2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_domain_get;
150 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
153 * Instruction metadata
161 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
162 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
168 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
173 /* keep in sync with the enum in mini.h */
176 #include "mini-ops.h"
181 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
182 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
184 * This should contain the index of the last sreg + 1. This is not the same
185 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
187 const gint8 ins_sreg_counts[] = {
188 #include "mini-ops.h"
193 #define MONO_INIT_VARINFO(vi,id) do { \
194 (vi)->range.first_use.pos.bid = 0xffff; \
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_lreg (MonoCompile *cfg)
208 return alloc_lreg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_get_underlying_type (type);
275 switch (type->type) {
288 case MONO_TYPE_FNPTR:
290 case MONO_TYPE_CLASS:
291 case MONO_TYPE_STRING:
292 case MONO_TYPE_OBJECT:
293 case MONO_TYPE_SZARRAY:
294 case MONO_TYPE_ARRAY:
298 #if SIZEOF_REGISTER == 8
304 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
307 case MONO_TYPE_VALUETYPE:
308 if (type->data.klass->enumtype) {
309 type = mono_class_enum_basetype (type->data.klass);
312 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
315 case MONO_TYPE_TYPEDBYREF:
317 case MONO_TYPE_GENERICINST:
318 type = &type->data.generic_class->container_class->byval_arg;
322 g_assert (cfg->gshared);
323 if (mini_type_var_is_vt (type))
326 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
328 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
334 mono_print_bb (MonoBasicBlock *bb, const char *msg)
339 printf ("\n%s %d: [IN: ", msg, bb->block_num);
340 for (i = 0; i < bb->in_count; ++i)
341 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
343 for (i = 0; i < bb->out_count; ++i)
344 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
346 for (tree = bb->code; tree; tree = tree->next)
347 mono_print_ins_index (-1, tree);
351 mono_create_helper_signatures (void)
353 helper_sig_domain_get = mono_create_icall_signature ("ptr");
354 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
357 static MONO_NEVER_INLINE void
358 break_on_unverified (void)
360 if (mini_get_debug_options ()->break_on_unverified)
364 static MONO_NEVER_INLINE void
365 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
367 char *method_fname = mono_method_full_name (method, TRUE);
368 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
369 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
370 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
371 g_free (method_fname);
372 g_free (cil_method_fname);
375 static MONO_NEVER_INLINE void
376 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
378 char *method_fname = mono_method_full_name (method, TRUE);
379 char *field_fname = mono_field_full_name (field);
380 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
381 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
382 g_free (method_fname);
383 g_free (field_fname);
386 static MONO_NEVER_INLINE void
387 inline_failure (MonoCompile *cfg, const char *msg)
389 if (cfg->verbose_level >= 2)
390 printf ("inline failed: %s\n", msg);
391 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
394 static MONO_NEVER_INLINE void
395 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
397 if (cfg->verbose_level > 2) \
398 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
402 static MONO_NEVER_INLINE void
403 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
406 if (cfg->verbose_level >= 2)
407 printf ("%s\n", cfg->exception_message);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
412 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
413 * foo<T> (int i) { ldarg.0; box T; }
415 #define UNVERIFIED do { \
416 if (cfg->gsharedvt) { \
417 if (cfg->verbose_level > 2) \
418 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
419 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
420 goto exception_exit; \
422 break_on_unverified (); \
426 #define GET_BBLOCK(cfg,tblock,ip) do { \
427 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
429 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
430 NEW_BBLOCK (cfg, (tblock)); \
431 (tblock)->cil_code = (ip); \
432 ADD_BBLOCK (cfg, (tblock)); \
436 #if defined(TARGET_X86) || defined(TARGET_AMD64)
437 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
438 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
439 (dest)->dreg = alloc_ireg_mp ((cfg)); \
440 (dest)->sreg1 = (sr1); \
441 (dest)->sreg2 = (sr2); \
442 (dest)->inst_imm = (imm); \
443 (dest)->backend.shift_amount = (shift); \
444 MONO_ADD_INS ((cfg)->cbb, (dest)); \
448 /* Emit conversions so both operands of a binary opcode are of the same type */
450 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
452 MonoInst *arg1 = *arg1_ref;
453 MonoInst *arg2 = *arg2_ref;
456 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
457 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
460 /* Mixing r4/r8 is allowed by the spec */
461 if (arg1->type == STACK_R4) {
462 int dreg = alloc_freg (cfg);
464 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
465 conv->type = STACK_R8;
469 if (arg2->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
473 conv->type = STACK_R8;
479 #if SIZEOF_REGISTER == 8
480 /* FIXME: Need to add many more cases */
481 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
484 int dr = alloc_preg (cfg);
485 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
486 (ins)->sreg2 = widen->dreg;
491 #define ADD_BINOP(op) do { \
492 MONO_INST_NEW (cfg, ins, (op)); \
494 ins->sreg1 = sp [0]->dreg; \
495 ins->sreg2 = sp [1]->dreg; \
496 type_from_op (cfg, ins, sp [0], sp [1]); \
498 /* Have to insert a widening op */ \
499 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
500 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
501 MONO_ADD_INS ((cfg)->cbb, (ins)); \
502 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
505 #define ADD_UNOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 type_from_op (cfg, ins, sp [0], NULL); \
511 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
512 MONO_ADD_INS ((cfg)->cbb, (ins)); \
513 *sp++ = mono_decompose_opcode (cfg, ins); \
516 #define ADD_BINCOND(next_block) do { \
519 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
520 cmp->sreg1 = sp [0]->dreg; \
521 cmp->sreg2 = sp [1]->dreg; \
522 type_from_op (cfg, cmp, sp [0], sp [1]); \
524 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
525 type_from_op (cfg, ins, sp [0], sp [1]); \
526 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
527 GET_BBLOCK (cfg, tblock, target); \
528 link_bblock (cfg, cfg->cbb, tblock); \
529 ins->inst_true_bb = tblock; \
530 if ((next_block)) { \
531 link_bblock (cfg, cfg->cbb, (next_block)); \
532 ins->inst_false_bb = (next_block); \
533 start_new_bblock = 1; \
535 GET_BBLOCK (cfg, tblock, ip); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_false_bb = tblock; \
538 start_new_bblock = 2; \
540 if (sp != stack_start) { \
541 handle_stack_args (cfg, stack_start, sp - stack_start); \
542 CHECK_UNVERIFIABLE (cfg); \
544 MONO_ADD_INS (cfg->cbb, cmp); \
545 MONO_ADD_INS (cfg->cbb, ins); \
549 * link_bblock: Links two basic blocks
551 * links two basic blocks in the control flow graph, the 'from'
552 * argument is the starting block and the 'to' argument is the block
553 * the control flow ends to after 'from'.
556 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
558 MonoBasicBlock **newa;
562 if (from->cil_code) {
564 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
566 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
569 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
571 printf ("edge from entry to exit\n");
576 for (i = 0; i < from->out_count; ++i) {
577 if (to == from->out_bb [i]) {
583 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
584 for (i = 0; i < from->out_count; ++i) {
585 newa [i] = from->out_bb [i];
593 for (i = 0; i < to->in_count; ++i) {
594 if (from == to->in_bb [i]) {
600 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
601 for (i = 0; i < to->in_count; ++i) {
602 newa [i] = to->in_bb [i];
611 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
613 link_bblock (cfg, from, to);
617 * mono_find_block_region:
619 * We mark each basic block with a region ID. We use that to avoid BB
620 * optimizations when blocks are in different regions.
623 * A region token that encodes where this region is, and information
624 * about the clause owner for this block.
626 * The region encodes the try/catch/filter clause that owns this block
627 * as well as the type. -1 is a special value that represents a block
628 * that is in none of try/catch/filter.
631 mono_find_block_region (MonoCompile *cfg, int offset)
633 MonoMethodHeader *header = cfg->header;
634 MonoExceptionClause *clause;
637 for (i = 0; i < header->num_clauses; ++i) {
638 clause = &header->clauses [i];
639 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
640 (offset < (clause->handler_offset)))
641 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
643 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
644 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
645 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
646 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
647 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
649 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
652 for (i = 0; i < header->num_clauses; ++i) {
653 clause = &header->clauses [i];
655 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
656 return ((i + 1) << 8) | clause->flags;
663 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
665 MonoMethodHeader *header = cfg->header;
666 MonoExceptionClause *clause;
670 for (i = 0; i < header->num_clauses; ++i) {
671 clause = &header->clauses [i];
672 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
673 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
674 if (clause->flags == type)
675 res = g_list_append (res, clause);
682 mono_create_spvar_for_region (MonoCompile *cfg, int region)
686 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
690 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
691 /* prevent it from being register allocated */
692 var->flags |= MONO_INST_VOLATILE;
694 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
698 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
700 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
704 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
708 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
712 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
713 /* prevent it from being register allocated */
714 var->flags |= MONO_INST_VOLATILE;
716 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
722 * Returns the type used in the eval stack when @type is loaded.
723 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
726 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
730 type = mini_get_underlying_type (type);
731 inst->klass = klass = mono_class_from_mono_type (type);
733 inst->type = STACK_MP;
738 switch (type->type) {
740 inst->type = STACK_INV;
748 inst->type = STACK_I4;
753 case MONO_TYPE_FNPTR:
754 inst->type = STACK_PTR;
756 case MONO_TYPE_CLASS:
757 case MONO_TYPE_STRING:
758 case MONO_TYPE_OBJECT:
759 case MONO_TYPE_SZARRAY:
760 case MONO_TYPE_ARRAY:
761 inst->type = STACK_OBJ;
765 inst->type = STACK_I8;
768 inst->type = cfg->r4_stack_type;
771 inst->type = STACK_R8;
773 case MONO_TYPE_VALUETYPE:
774 if (type->data.klass->enumtype) {
775 type = mono_class_enum_basetype (type->data.klass);
779 inst->type = STACK_VTYPE;
782 case MONO_TYPE_TYPEDBYREF:
783 inst->klass = mono_defaults.typed_reference_class;
784 inst->type = STACK_VTYPE;
786 case MONO_TYPE_GENERICINST:
787 type = &type->data.generic_class->container_class->byval_arg;
791 g_assert (cfg->gshared);
792 if (mini_is_gsharedvt_type (type)) {
793 g_assert (cfg->gsharedvt);
794 inst->type = STACK_VTYPE;
796 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
800 g_error ("unknown type 0x%02x in eval stack type", type->type);
805 * The following tables are used to quickly validate the IL code in type_from_op ().
808 bin_num_table [STACK_MAX] [STACK_MAX] = {
809 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
814 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
816 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
822 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
825 /* reduce the size of this table */
827 bin_int_table [STACK_MAX] [STACK_MAX] = {
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
839 bin_comp_table [STACK_MAX] [STACK_MAX] = {
840 /* Inv i L p F & O vt r4 */
842 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
843 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
844 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
845 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
846 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
847 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
848 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
849 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
852 /* reduce the size of this table */
854 shift_table [STACK_MAX] [STACK_MAX] = {
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
866 * Tables to map from the non-specific opcode to the matching
867 * type-specific opcode.
869 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
871 binops_op_map [STACK_MAX] = {
872 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
875 /* handles from CEE_NEG to CEE_CONV_U8 */
877 unops_op_map [STACK_MAX] = {
878 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
881 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
883 ovfops_op_map [STACK_MAX] = {
884 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
887 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
889 ovf2ops_op_map [STACK_MAX] = {
890 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
893 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
895 ovf3ops_op_map [STACK_MAX] = {
896 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
899 /* handles from CEE_BEQ to CEE_BLT_UN */
901 beqops_op_map [STACK_MAX] = {
902 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
905 /* handles from CEE_CEQ to CEE_CLT_UN */
907 ceqops_op_map [STACK_MAX] = {
908 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
912 * Sets ins->type (the type on the eval stack) according to the
913 * type of the opcode and the arguments to it.
914 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
916 * FIXME: this function sets ins->type unconditionally in some cases, but
917 * it should set it to invalid for some types (a conv.x on an object)
920 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
922 switch (ins->opcode) {
929 /* FIXME: check unverifiable args for STACK_MP */
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += binops_op_map [ins->type];
938 ins->type = bin_int_table [src1->type] [src2->type];
939 ins->opcode += binops_op_map [ins->type];
944 ins->type = shift_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
950 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
951 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
952 ins->opcode = OP_LCOMPARE;
953 else if (src1->type == STACK_R4)
954 ins->opcode = OP_RCOMPARE;
955 else if (src1->type == STACK_R8)
956 ins->opcode = OP_FCOMPARE;
958 ins->opcode = OP_ICOMPARE;
960 case OP_ICOMPARE_IMM:
961 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
962 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
963 ins->opcode = OP_LCOMPARE_IMM;
975 ins->opcode += beqops_op_map [src1->type];
978 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
979 ins->opcode += ceqops_op_map [src1->type];
985 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
986 ins->opcode += ceqops_op_map [src1->type];
990 ins->type = neg_table [src1->type];
991 ins->opcode += unops_op_map [ins->type];
994 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
995 ins->type = src1->type;
997 ins->type = STACK_INV;
998 ins->opcode += unops_op_map [ins->type];
1004 ins->type = STACK_I4;
1005 ins->opcode += unops_op_map [src1->type];
1008 ins->type = STACK_R8;
1009 switch (src1->type) {
1012 ins->opcode = OP_ICONV_TO_R_UN;
1015 ins->opcode = OP_LCONV_TO_R_UN;
1019 case CEE_CONV_OVF_I1:
1020 case CEE_CONV_OVF_U1:
1021 case CEE_CONV_OVF_I2:
1022 case CEE_CONV_OVF_U2:
1023 case CEE_CONV_OVF_I4:
1024 case CEE_CONV_OVF_U4:
1025 ins->type = STACK_I4;
1026 ins->opcode += ovf3ops_op_map [src1->type];
1028 case CEE_CONV_OVF_I_UN:
1029 case CEE_CONV_OVF_U_UN:
1030 ins->type = STACK_PTR;
1031 ins->opcode += ovf2ops_op_map [src1->type];
1033 case CEE_CONV_OVF_I1_UN:
1034 case CEE_CONV_OVF_I2_UN:
1035 case CEE_CONV_OVF_I4_UN:
1036 case CEE_CONV_OVF_U1_UN:
1037 case CEE_CONV_OVF_U2_UN:
1038 case CEE_CONV_OVF_U4_UN:
1039 ins->type = STACK_I4;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1043 ins->type = STACK_PTR;
1044 switch (src1->type) {
1046 ins->opcode = OP_ICONV_TO_U;
1050 #if SIZEOF_VOID_P == 8
1051 ins->opcode = OP_LCONV_TO_U;
1053 ins->opcode = OP_MOVE;
1057 ins->opcode = OP_LCONV_TO_U;
1060 ins->opcode = OP_FCONV_TO_U;
1066 ins->type = STACK_I8;
1067 ins->opcode += unops_op_map [src1->type];
1069 case CEE_CONV_OVF_I8:
1070 case CEE_CONV_OVF_U8:
1071 ins->type = STACK_I8;
1072 ins->opcode += ovf3ops_op_map [src1->type];
1074 case CEE_CONV_OVF_U8_UN:
1075 case CEE_CONV_OVF_I8_UN:
1076 ins->type = STACK_I8;
1077 ins->opcode += ovf2ops_op_map [src1->type];
1080 ins->type = cfg->r4_stack_type;
1081 ins->opcode += unops_op_map [src1->type];
1084 ins->type = STACK_R8;
1085 ins->opcode += unops_op_map [src1->type];
1088 ins->type = STACK_R8;
1092 ins->type = STACK_I4;
1093 ins->opcode += ovfops_op_map [src1->type];
1096 case CEE_CONV_OVF_I:
1097 case CEE_CONV_OVF_U:
1098 ins->type = STACK_PTR;
1099 ins->opcode += ovfops_op_map [src1->type];
1102 case CEE_ADD_OVF_UN:
1104 case CEE_MUL_OVF_UN:
1106 case CEE_SUB_OVF_UN:
1107 ins->type = bin_num_table [src1->type] [src2->type];
1108 ins->opcode += ovfops_op_map [src1->type];
1109 if (ins->type == STACK_R8)
1110 ins->type = STACK_INV;
1112 case OP_LOAD_MEMBASE:
1113 ins->type = STACK_PTR;
1115 case OP_LOADI1_MEMBASE:
1116 case OP_LOADU1_MEMBASE:
1117 case OP_LOADI2_MEMBASE:
1118 case OP_LOADU2_MEMBASE:
1119 case OP_LOADI4_MEMBASE:
1120 case OP_LOADU4_MEMBASE:
1121 ins->type = STACK_PTR;
1123 case OP_LOADI8_MEMBASE:
1124 ins->type = STACK_I8;
1126 case OP_LOADR4_MEMBASE:
1127 ins->type = cfg->r4_stack_type;
1129 case OP_LOADR8_MEMBASE:
1130 ins->type = STACK_R8;
1133 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1137 if (ins->type == STACK_MP)
1138 ins->klass = mono_defaults.object_class;
1143 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1149 param_table [STACK_MAX] [STACK_MAX] = {
1154 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1159 switch (args->type) {
1169 for (i = 0; i < sig->param_count; ++i) {
1170 switch (args [i].type) {
1174 if (!sig->params [i]->byref)
1178 if (sig->params [i]->byref)
1180 switch (sig->params [i]->type) {
1181 case MONO_TYPE_CLASS:
1182 case MONO_TYPE_STRING:
1183 case MONO_TYPE_OBJECT:
1184 case MONO_TYPE_SZARRAY:
1185 case MONO_TYPE_ARRAY:
1192 if (sig->params [i]->byref)
1194 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1203 /*if (!param_table [args [i].type] [sig->params [i]->type])
1211 * When we need a pointer to the current domain many times in a method, we
1212 * call mono_domain_get() once and we store the result in a local variable.
1213 * This function returns the variable that represents the MonoDomain*.
1215 inline static MonoInst *
1216 mono_get_domainvar (MonoCompile *cfg)
1218 if (!cfg->domainvar)
1219 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1220 return cfg->domainvar;
1224 * The got_var contains the address of the Global Offset Table when AOT
1228 mono_get_got_var (MonoCompile *cfg)
1230 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1232 if (!cfg->got_var) {
1233 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1235 return cfg->got_var;
1239 mono_get_vtable_var (MonoCompile *cfg)
1241 g_assert (cfg->gshared);
1243 if (!cfg->rgctx_var) {
1244 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1245 /* force the var to be stack allocated */
1246 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1249 return cfg->rgctx_var;
1253 type_from_stack_type (MonoInst *ins) {
1254 switch (ins->type) {
1255 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1256 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1257 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1258 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1259 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1261 return &ins->klass->this_arg;
1262 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1263 case STACK_VTYPE: return &ins->klass->byval_arg;
1265 g_error ("stack type %d to monotype not handled\n", ins->type);
1270 static G_GNUC_UNUSED int
1271 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1273 t = mono_type_get_underlying_type (t);
1285 case MONO_TYPE_FNPTR:
1287 case MONO_TYPE_CLASS:
1288 case MONO_TYPE_STRING:
1289 case MONO_TYPE_OBJECT:
1290 case MONO_TYPE_SZARRAY:
1291 case MONO_TYPE_ARRAY:
1297 return cfg->r4_stack_type;
1300 case MONO_TYPE_VALUETYPE:
1301 case MONO_TYPE_TYPEDBYREF:
1303 case MONO_TYPE_GENERICINST:
1304 if (mono_type_generic_inst_is_valuetype (t))
1310 g_assert_not_reached ();
1317 array_access_to_klass (int opcode)
1321 return mono_defaults.byte_class;
1323 return mono_defaults.uint16_class;
1326 return mono_defaults.int_class;
1329 return mono_defaults.sbyte_class;
1332 return mono_defaults.int16_class;
1335 return mono_defaults.int32_class;
1337 return mono_defaults.uint32_class;
1340 return mono_defaults.int64_class;
1343 return mono_defaults.single_class;
1346 return mono_defaults.double_class;
1347 case CEE_LDELEM_REF:
1348 case CEE_STELEM_REF:
1349 return mono_defaults.object_class;
1351 g_assert_not_reached ();
1357 * We try to share variables when possible
1360 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1365 /* inlining can result in deeper stacks */
1366 if (slot >= cfg->header->max_stack)
1367 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1369 pos = ins->type - 1 + slot * STACK_MAX;
1371 switch (ins->type) {
1378 if ((vnum = cfg->intvars [pos]))
1379 return cfg->varinfo [vnum];
1380 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1381 cfg->intvars [pos] = res->inst_c0;
1384 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1390 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1393 * Don't use this if a generic_context is set, since that means AOT can't
1394 * look up the method using just the image+token.
1395 * table == 0 means this is a reference made from a wrapper.
1397 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1398 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1399 jump_info_token->image = image;
1400 jump_info_token->token = token;
1401 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1406 * This function is called to handle items that are left on the evaluation stack
1407 * at basic block boundaries. What happens is that we save the values to local variables
1408 * and we reload them later when first entering the target basic block (with the
1409 * handle_loaded_temps () function).
1410 * A single joint point will use the same variables (stored in the array bb->out_stack or
1411 * bb->in_stack, if the basic block is before or after the joint point).
1413 * This function needs to be called _before_ emitting the last instruction of
1414 * the bb (i.e. before emitting a branch).
1415 * If the stack merge fails at a join point, cfg->unverifiable is set.
1418 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1421 MonoBasicBlock *bb = cfg->cbb;
1422 MonoBasicBlock *outb;
1423 MonoInst *inst, **locals;
1428 if (cfg->verbose_level > 3)
1429 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1430 if (!bb->out_scount) {
1431 bb->out_scount = count;
1432 //printf ("bblock %d has out:", bb->block_num);
1434 for (i = 0; i < bb->out_count; ++i) {
1435 outb = bb->out_bb [i];
1436 /* exception handlers are linked, but they should not be considered for stack args */
1437 if (outb->flags & BB_EXCEPTION_HANDLER)
1439 //printf (" %d", outb->block_num);
1440 if (outb->in_stack) {
1442 bb->out_stack = outb->in_stack;
1448 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1449 for (i = 0; i < count; ++i) {
1451 * try to reuse temps already allocated for this purpouse, if they occupy the same
1452 * stack slot and if they are of the same type.
1453 * This won't cause conflicts since if 'local' is used to
1454 * store one of the values in the in_stack of a bblock, then
1455 * the same variable will be used for the same outgoing stack
1457 * This doesn't work when inlining methods, since the bblocks
1458 * in the inlined methods do not inherit their in_stack from
1459 * the bblock they are inlined to. See bug #58863 for an
1462 if (cfg->inlined_method)
1463 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1465 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1470 for (i = 0; i < bb->out_count; ++i) {
1471 outb = bb->out_bb [i];
1472 /* exception handlers are linked, but they should not be considered for stack args */
1473 if (outb->flags & BB_EXCEPTION_HANDLER)
1475 if (outb->in_scount) {
1476 if (outb->in_scount != bb->out_scount) {
1477 cfg->unverifiable = TRUE;
1480 continue; /* check they are the same locals */
1482 outb->in_scount = count;
1483 outb->in_stack = bb->out_stack;
1486 locals = bb->out_stack;
1488 for (i = 0; i < count; ++i) {
1489 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1490 inst->cil_code = sp [i]->cil_code;
1491 sp [i] = locals [i];
1492 if (cfg->verbose_level > 3)
1493 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1497 * It is possible that the out bblocks already have in_stack assigned, and
1498 * the in_stacks differ. In this case, we will store to all the different
1505 /* Find a bblock which has a different in_stack */
1507 while (bindex < bb->out_count) {
1508 outb = bb->out_bb [bindex];
1509 /* exception handlers are linked, but they should not be considered for stack args */
1510 if (outb->flags & BB_EXCEPTION_HANDLER) {
1514 if (outb->in_stack != locals) {
1515 for (i = 0; i < count; ++i) {
1516 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1517 inst->cil_code = sp [i]->cil_code;
1518 sp [i] = locals [i];
1519 if (cfg->verbose_level > 3)
1520 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1522 locals = outb->in_stack;
1532 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1534 int ibitmap_reg = alloc_preg (cfg);
1535 #ifdef COMPRESSED_INTERFACE_BITMAP
1537 MonoInst *res, *ins;
1538 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1539 MONO_ADD_INS (cfg->cbb, ins);
1541 if (cfg->compile_aot)
1542 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1544 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1545 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1546 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1548 int ibitmap_byte_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1552 if (cfg->compile_aot) {
1553 int iid_reg = alloc_preg (cfg);
1554 int shifted_iid_reg = alloc_preg (cfg);
1555 int ibitmap_byte_address_reg = alloc_preg (cfg);
1556 int masked_iid_reg = alloc_preg (cfg);
1557 int iid_one_bit_reg = alloc_preg (cfg);
1558 int iid_bit_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1561 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1564 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1565 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1566 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1568 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1575 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1576 * stored in "klass_reg" implements the interface "klass".
1579 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1581 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1585 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1586 * stored in "vtable_reg" implements the interface "klass".
1589 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1591 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1595 * Emit code which checks whenever the interface id of @klass is smaller than
1596 * than the value given by max_iid_reg.
1599 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1600 MonoBasicBlock *false_target)
1602 if (cfg->compile_aot) {
1603 int iid_reg = alloc_preg (cfg);
1604 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1605 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1610 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1612 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1615 /* Same as above, but obtains max_iid from a vtable */
1617 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1618 MonoBasicBlock *false_target)
1620 int max_iid_reg = alloc_preg (cfg);
1622 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1623 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1626 /* Same as above, but obtains max_iid from a klass */
1628 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1629 MonoBasicBlock *false_target)
1631 int max_iid_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1634 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1638 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1640 int idepth_reg = alloc_preg (cfg);
1641 int stypes_reg = alloc_preg (cfg);
1642 int stype = alloc_preg (cfg);
1644 mono_class_setup_supertypes (klass);
1646 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1649 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1654 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1655 } else if (cfg->compile_aot) {
1656 int const_reg = alloc_preg (cfg);
1657 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1658 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1662 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1666 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1668 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1672 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1674 int intf_reg = alloc_preg (cfg);
1676 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1677 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1682 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1686 * Variant of the above that takes a register to the class, not the vtable.
1689 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1691 int intf_bit_reg = alloc_preg (cfg);
1693 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1694 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1699 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1703 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1706 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1707 } else if (cfg->compile_aot) {
1708 int const_reg = alloc_preg (cfg);
1709 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1710 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1714 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1718 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1720 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1724 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1726 if (cfg->compile_aot) {
1727 int const_reg = alloc_preg (cfg);
1728 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1729 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1733 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1737 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1740 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1743 int rank_reg = alloc_preg (cfg);
1744 int eclass_reg = alloc_preg (cfg);
1746 g_assert (!klass_inst);
1747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1749 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1750 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1752 if (klass->cast_class == mono_defaults.object_class) {
1753 int parent_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1755 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1756 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1757 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1758 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1759 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1760 } else if (klass->cast_class == mono_defaults.enum_class) {
1761 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1762 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1763 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1765 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1766 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1769 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1770 /* Check that the object is a vector too */
1771 int bounds_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1773 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1774 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1777 int idepth_reg = alloc_preg (cfg);
1778 int stypes_reg = alloc_preg (cfg);
1779 int stype = alloc_preg (cfg);
1781 mono_class_setup_supertypes (klass);
1783 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1786 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1790 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1795 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1797 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1801 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1805 g_assert (val == 0);
1810 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1813 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1816 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1819 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1821 #if SIZEOF_REGISTER == 8
1823 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1829 val_reg = alloc_preg (cfg);
1831 if (SIZEOF_REGISTER == 8)
1832 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1834 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1837 /* This could be optimized further if neccesary */
1839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1846 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1860 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1877 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1884 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1885 g_assert (size < 10000);
1888 /* This could be optimized further if neccesary */
1890 cur_reg = alloc_preg (cfg);
1891 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1892 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1899 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1901 cur_reg = alloc_preg (cfg);
1902 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1903 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1911 cur_reg = alloc_preg (cfg);
1912 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1913 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1919 cur_reg = alloc_preg (cfg);
1920 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1921 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1937 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1941 if (cfg->compile_aot) {
1942 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1943 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1945 ins->sreg2 = c->dreg;
1946 MONO_ADD_INS (cfg->cbb, ins);
1948 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1950 ins->inst_offset = mini_get_tls_offset (tls_key);
1951 MONO_ADD_INS (cfg->cbb, ins);
1958 * Emit IR to push the current LMF onto the LMF stack.
1961 emit_push_lmf (MonoCompile *cfg)
1964 * Emit IR to push the LMF:
1965 * lmf_addr = <lmf_addr from tls>
1966 * lmf->lmf_addr = lmf_addr
1967 * lmf->prev_lmf = *lmf_addr
1970 int lmf_reg, prev_lmf_reg;
1971 MonoInst *ins, *lmf_ins;
1976 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1977 /* Load current lmf */
1978 lmf_ins = mono_get_lmf_intrinsic (cfg);
1980 MONO_ADD_INS (cfg->cbb, lmf_ins);
1981 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1982 lmf_reg = ins->dreg;
1983 /* Save previous_lmf */
1984 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1986 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1989 * Store lmf_addr in a variable, so it can be allocated to a global register.
1991 if (!cfg->lmf_addr_var)
1992 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1995 ins = mono_get_jit_tls_intrinsic (cfg);
1997 int jit_tls_dreg = ins->dreg;
1999 MONO_ADD_INS (cfg->cbb, ins);
2000 lmf_reg = alloc_preg (cfg);
2001 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2003 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2006 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2008 MONO_ADD_INS (cfg->cbb, lmf_ins);
2011 MonoInst *args [16], *jit_tls_ins, *ins;
2013 /* Inline mono_get_lmf_addr () */
2014 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2016 /* Load mono_jit_tls_id */
2017 if (cfg->compile_aot)
2018 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2020 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2021 /* call pthread_getspecific () */
2022 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2023 /* lmf_addr = &jit_tls->lmf */
2024 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2027 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2031 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2033 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2034 lmf_reg = ins->dreg;
2036 prev_lmf_reg = alloc_preg (cfg);
2037 /* Save previous_lmf */
2038 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2039 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2041 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2048 * Emit IR to pop the current LMF from the LMF stack.
2051 emit_pop_lmf (MonoCompile *cfg)
2053 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2059 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2060 lmf_reg = ins->dreg;
2062 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2063 /* Load previous_lmf */
2064 prev_lmf_reg = alloc_preg (cfg);
2065 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2067 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2070 * Emit IR to pop the LMF:
2071 * *(lmf->lmf_addr) = lmf->prev_lmf
2073 /* This could be called before emit_push_lmf () */
2074 if (!cfg->lmf_addr_var)
2075 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2076 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2078 prev_lmf_reg = alloc_preg (cfg);
2079 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2080 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2085 emit_instrumentation_call (MonoCompile *cfg, void *func)
2087 MonoInst *iargs [1];
2090 * Avoid instrumenting inlined methods since it can
2091 * distort profiling results.
2093 if (cfg->method != cfg->current_method)
2096 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2097 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2098 mono_emit_jit_icall (cfg, func, iargs);
2103 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2106 type = mini_get_underlying_type (type);
2107 switch (type->type) {
2108 case MONO_TYPE_VOID:
2109 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2116 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2120 case MONO_TYPE_FNPTR:
2121 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2122 case MONO_TYPE_CLASS:
2123 case MONO_TYPE_STRING:
2124 case MONO_TYPE_OBJECT:
2125 case MONO_TYPE_SZARRAY:
2126 case MONO_TYPE_ARRAY:
2127 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2130 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2133 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2135 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2137 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2138 case MONO_TYPE_VALUETYPE:
2139 if (type->data.klass->enumtype) {
2140 type = mono_class_enum_basetype (type->data.klass);
2143 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2144 case MONO_TYPE_TYPEDBYREF:
2145 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2146 case MONO_TYPE_GENERICINST:
2147 type = &type->data.generic_class->container_class->byval_arg;
2150 case MONO_TYPE_MVAR:
2152 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2154 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2160 * target_type_is_incompatible:
2161 * @cfg: MonoCompile context
2163 * Check that the item @arg on the evaluation stack can be stored
2164 * in the target type (can be a local, or field, etc).
2165 * The cfg arg can be used to check if we need verification or just
2168 * Returns: non-0 value if arg can't be stored on a target.
2171 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2173 MonoType *simple_type;
2176 if (target->byref) {
2177 /* FIXME: check that the pointed to types match */
2178 if (arg->type == STACK_MP)
2179 return target->type != MONO_TYPE_I && arg->klass != mono_class_from_mono_type (target);
2180 if (arg->type == STACK_PTR)
2185 simple_type = mini_get_underlying_type (target);
2186 switch (simple_type->type) {
2187 case MONO_TYPE_VOID:
2195 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2199 /* STACK_MP is needed when setting pinned locals */
2200 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2205 case MONO_TYPE_FNPTR:
2207 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2208 * in native int. (#688008).
2210 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2213 case MONO_TYPE_CLASS:
2214 case MONO_TYPE_STRING:
2215 case MONO_TYPE_OBJECT:
2216 case MONO_TYPE_SZARRAY:
2217 case MONO_TYPE_ARRAY:
2218 if (arg->type != STACK_OBJ)
2220 /* FIXME: check type compatibility */
2224 if (arg->type != STACK_I8)
2228 if (arg->type != cfg->r4_stack_type)
2232 if (arg->type != STACK_R8)
2235 case MONO_TYPE_VALUETYPE:
2236 if (arg->type != STACK_VTYPE)
2238 klass = mono_class_from_mono_type (simple_type);
2239 if (klass != arg->klass)
2242 case MONO_TYPE_TYPEDBYREF:
2243 if (arg->type != STACK_VTYPE)
2245 klass = mono_class_from_mono_type (simple_type);
2246 if (klass != arg->klass)
2249 case MONO_TYPE_GENERICINST:
2250 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2251 if (arg->type != STACK_VTYPE)
2253 klass = mono_class_from_mono_type (simple_type);
2254 /* The second cases is needed when doing partial sharing */
2255 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2259 if (arg->type != STACK_OBJ)
2261 /* FIXME: check type compatibility */
2265 case MONO_TYPE_MVAR:
2266 g_assert (cfg->gshared);
2267 if (mini_type_var_is_vt (simple_type)) {
2268 if (arg->type != STACK_VTYPE)
2271 if (arg->type != STACK_OBJ)
2276 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2282 * Prepare arguments for passing to a function call.
2283 * Return a non-zero value if the arguments can't be passed to the given
2285 * The type checks are not yet complete and some conversions may need
2286 * casts on 32 or 64 bit architectures.
2288 * FIXME: implement this using target_type_is_incompatible ()
2291 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2293 MonoType *simple_type;
2297 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2301 for (i = 0; i < sig->param_count; ++i) {
2302 if (sig->params [i]->byref) {
2303 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2307 simple_type = mini_get_underlying_type (sig->params [i]);
2309 switch (simple_type->type) {
2310 case MONO_TYPE_VOID:
2319 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2325 case MONO_TYPE_FNPTR:
2326 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2329 case MONO_TYPE_CLASS:
2330 case MONO_TYPE_STRING:
2331 case MONO_TYPE_OBJECT:
2332 case MONO_TYPE_SZARRAY:
2333 case MONO_TYPE_ARRAY:
2334 if (args [i]->type != STACK_OBJ)
2339 if (args [i]->type != STACK_I8)
2343 if (args [i]->type != cfg->r4_stack_type)
2347 if (args [i]->type != STACK_R8)
2350 case MONO_TYPE_VALUETYPE:
2351 if (simple_type->data.klass->enumtype) {
2352 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2355 if (args [i]->type != STACK_VTYPE)
2358 case MONO_TYPE_TYPEDBYREF:
2359 if (args [i]->type != STACK_VTYPE)
2362 case MONO_TYPE_GENERICINST:
2363 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2366 case MONO_TYPE_MVAR:
2368 if (args [i]->type != STACK_VTYPE)
2372 g_error ("unknown type 0x%02x in check_call_signature",
2380 callvirt_to_call (int opcode)
2383 case OP_CALL_MEMBASE:
2385 case OP_VOIDCALL_MEMBASE:
2387 case OP_FCALL_MEMBASE:
2389 case OP_RCALL_MEMBASE:
2391 case OP_VCALL_MEMBASE:
2393 case OP_LCALL_MEMBASE:
2396 g_assert_not_reached ();
2403 callvirt_to_call_reg (int opcode)
2406 case OP_CALL_MEMBASE:
2408 case OP_VOIDCALL_MEMBASE:
2409 return OP_VOIDCALL_REG;
2410 case OP_FCALL_MEMBASE:
2411 return OP_FCALL_REG;
2412 case OP_RCALL_MEMBASE:
2413 return OP_RCALL_REG;
2414 case OP_VCALL_MEMBASE:
2415 return OP_VCALL_REG;
2416 case OP_LCALL_MEMBASE:
2417 return OP_LCALL_REG;
2419 g_assert_not_reached ();
2425 /* Either METHOD or IMT_ARG needs to be set */
2427 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2431 if (COMPILE_LLVM (cfg)) {
2432 method_reg = alloc_preg (cfg);
2435 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2436 } else if (cfg->compile_aot) {
2437 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2440 MONO_INST_NEW (cfg, ins, OP_PCONST);
2441 ins->inst_p0 = method;
2442 ins->dreg = method_reg;
2443 MONO_ADD_INS (cfg->cbb, ins);
2447 call->imt_arg_reg = method_reg;
2449 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2453 method_reg = alloc_preg (cfg);
2456 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2457 } else if (cfg->compile_aot) {
2458 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2461 MONO_INST_NEW (cfg, ins, OP_PCONST);
2462 ins->inst_p0 = method;
2463 ins->dreg = method_reg;
2464 MONO_ADD_INS (cfg->cbb, ins);
2467 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2470 static MonoJumpInfo *
2471 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2473 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2477 ji->data.target = target;
2483 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2486 return mono_class_check_context_used (klass);
2492 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2495 return mono_method_check_context_used (method);
2501 * check_method_sharing:
2503 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2506 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2508 gboolean pass_vtable = FALSE;
2509 gboolean pass_mrgctx = FALSE;
2511 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2512 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2513 gboolean sharable = FALSE;
2515 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2519 * Pass vtable iff target method might
2520 * be shared, which means that sharing
2521 * is enabled for its class and its
2522 * context is sharable (and it's not a
2525 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2529 if (mini_method_get_context (cmethod) &&
2530 mini_method_get_context (cmethod)->method_inst) {
2531 g_assert (!pass_vtable);
2533 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2536 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2541 if (out_pass_vtable)
2542 *out_pass_vtable = pass_vtable;
2543 if (out_pass_mrgctx)
2544 *out_pass_mrgctx = pass_mrgctx;
2547 inline static MonoCallInst *
2548 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2549 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2553 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2561 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2563 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2565 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
2568 call->signature = sig;
2569 call->rgctx_reg = rgctx;
2570 sig_ret = mini_get_underlying_type (sig->ret);
2572 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2575 if (mini_type_is_vtype (sig_ret)) {
2576 call->vret_var = cfg->vret_addr;
2577 //g_assert_not_reached ();
2579 } else if (mini_type_is_vtype (sig_ret)) {
2580 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2583 temp->backend.is_pinvoke = sig->pinvoke;
2586 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2587 * address of return value to increase optimization opportunities.
2588 * Before vtype decomposition, the dreg of the call ins itself represents the
2589 * fact the call modifies the return value. After decomposition, the call will
2590 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2591 * will be transformed into an LDADDR.
2593 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2594 loada->dreg = alloc_preg (cfg);
2595 loada->inst_p0 = temp;
2596 /* We reference the call too since call->dreg could change during optimization */
2597 loada->inst_p1 = call;
2598 MONO_ADD_INS (cfg->cbb, loada);
2600 call->inst.dreg = temp->dreg;
2602 call->vret_var = loada;
2603 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2604 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2606 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2607 if (COMPILE_SOFT_FLOAT (cfg)) {
2609 * If the call has a float argument, we would need to do an r8->r4 conversion using
2610 * an icall, but that cannot be done during the call sequence since it would clobber
2611 * the call registers + the stack. So we do it before emitting the call.
2613 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2615 MonoInst *in = call->args [i];
2617 if (i >= sig->hasthis)
2618 t = sig->params [i - sig->hasthis];
2620 t = &mono_defaults.int_class->byval_arg;
2621 t = mono_type_get_underlying_type (t);
2623 if (!t->byref && t->type == MONO_TYPE_R4) {
2624 MonoInst *iargs [1];
2628 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2630 /* The result will be in an int vreg */
2631 call->args [i] = conv;
2637 call->need_unbox_trampoline = unbox_trampoline;
2640 if (COMPILE_LLVM (cfg))
2641 mono_llvm_emit_call (cfg, call);
2643 mono_arch_emit_call (cfg, call);
2645 mono_arch_emit_call (cfg, call);
2648 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2649 cfg->flags |= MONO_CFG_HAS_CALLS;
2655 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2657 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2658 cfg->uses_rgctx_reg = TRUE;
2659 call->rgctx_reg = TRUE;
2661 call->rgctx_arg_reg = rgctx_reg;
2665 inline static MonoInst*
2666 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2671 gboolean check_sp = FALSE;
2673 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2674 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2676 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2681 rgctx_reg = mono_alloc_preg (cfg);
2682 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2686 if (!cfg->stack_inbalance_var)
2687 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2689 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2690 ins->dreg = cfg->stack_inbalance_var->dreg;
2691 MONO_ADD_INS (cfg->cbb, ins);
2694 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2696 call->inst.sreg1 = addr->dreg;
2699 emit_imt_argument (cfg, call, NULL, imt_arg);
2701 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2706 sp_reg = mono_alloc_preg (cfg);
2708 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2710 MONO_ADD_INS (cfg->cbb, ins);
2712 /* Restore the stack so we don't crash when throwing the exception */
2713 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2714 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2715 MONO_ADD_INS (cfg->cbb, ins);
2717 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2718 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2722 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2724 return (MonoInst*)call;
2728 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2731 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2733 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2736 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2737 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2739 #ifndef DISABLE_REMOTING
2740 gboolean might_be_remote = FALSE;
2742 gboolean virtual = this_ins != NULL;
2743 gboolean enable_for_aot = TRUE;
2746 MonoInst *call_target = NULL;
2748 gboolean need_unbox_trampoline;
2751 sig = mono_method_signature (method);
2753 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
2754 MonoInst *icall_args [16];
2757 // FIXME: Optimize this
2759 guint32 imt_slot = mono_method_get_imt_slot (method);
2761 icall_args [0] = this_ins;
2762 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
2764 icall_args [2] = imt_arg;
2766 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, method);
2767 icall_args [2] = ins;
2769 EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
2771 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
2775 rgctx_reg = mono_alloc_preg (cfg);
2776 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2779 if (method->string_ctor) {
2780 /* Create the real signature */
2781 /* FIXME: Cache these */
2782 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2783 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2788 context_used = mini_method_check_context_used (cfg, method);
2790 #ifndef DISABLE_REMOTING
2791 might_be_remote = this_ins && sig->hasthis &&
2792 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2793 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2795 if (might_be_remote && context_used) {
2798 g_assert (cfg->gshared);
2800 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2802 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2806 if (cfg->llvm_only && !call_target && virtual && (method->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
2807 // FIXME: Vcall optimizations below
2808 MonoInst *icall_args [16];
2811 if (sig->generic_param_count) {
2813 * Generic virtual call, pass the concrete method as the imt argument.
2815 imt_arg = emit_get_rgctx_method (cfg, context_used,
2816 method, MONO_RGCTX_INFO_METHOD);
2819 // FIXME: Optimize this
2821 int slot = mono_method_get_vtable_index (method);
2823 icall_args [0] = this_ins;
2824 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
2826 icall_args [2] = imt_arg;
2828 EMIT_NEW_PCONST (cfg, ins, NULL);
2829 icall_args [2] = ins;
2831 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall, icall_args);
2834 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2836 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2838 #ifndef DISABLE_REMOTING
2839 if (might_be_remote)
2840 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2843 call->method = method;
2844 call->inst.flags |= MONO_INST_HAS_METHOD;
2845 call->inst.inst_left = this_ins;
2846 call->tail_call = tail;
2849 int vtable_reg, slot_reg, this_reg;
2852 this_reg = this_ins->dreg;
2854 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2855 MonoInst *dummy_use;
2857 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2859 /* Make a call to delegate->invoke_impl */
2860 call->inst.inst_basereg = this_reg;
2861 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2862 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2864 /* We must emit a dummy use here because the delegate trampoline will
2865 replace the 'this' argument with the delegate target making this activation
2866 no longer a root for the delegate.
2867 This is an issue for delegates that target collectible code such as dynamic
2868 methods of GC'able assemblies.
2870 For a test case look into #667921.
2872 FIXME: a dummy use is not the best way to do it as the local register allocator
2873 will put it on a caller save register and spil it around the call.
2874 Ideally, we would either put it on a callee save register or only do the store part.
2876 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2878 return (MonoInst*)call;
2881 if ((!cfg->compile_aot || enable_for_aot) &&
2882 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2883 (MONO_METHOD_IS_FINAL (method) &&
2884 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2885 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2887 * the method is not virtual, we just need to ensure this is not null
2888 * and then we can call the method directly.
2890 #ifndef DISABLE_REMOTING
2891 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2893 * The check above ensures method is not gshared, this is needed since
2894 * gshared methods can't have wrappers.
2896 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2900 if (!method->string_ctor)
2901 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2903 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2904 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2906 * the method is virtual, but we can statically dispatch since either
2907 * it's class or the method itself are sealed.
2908 * But first we need to ensure it's not a null reference.
2910 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2912 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2913 } else if (call_target) {
2914 vtable_reg = alloc_preg (cfg);
2915 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2917 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2918 call->inst.sreg1 = call_target->dreg;
2919 call->inst.flags &= !MONO_INST_HAS_METHOD;
2921 vtable_reg = alloc_preg (cfg);
2922 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2923 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2924 guint32 imt_slot = mono_method_get_imt_slot (method);
2925 emit_imt_argument (cfg, call, call->method, imt_arg);
2926 slot_reg = vtable_reg;
2927 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2929 slot_reg = vtable_reg;
2930 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2931 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2933 g_assert (mono_method_signature (method)->generic_param_count);
2934 emit_imt_argument (cfg, call, call->method, imt_arg);
2938 call->inst.sreg1 = slot_reg;
2939 call->inst.inst_offset = offset;
2940 call->is_virtual = TRUE;
2944 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2947 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2949 return (MonoInst*)call;
2953 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2955 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2959 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2966 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2969 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2971 return (MonoInst*)call;
2975 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2977 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2981 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2985 * mono_emit_abs_call:
2987 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2989 inline static MonoInst*
2990 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2991 MonoMethodSignature *sig, MonoInst **args)
2993 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2997 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
3000 if (cfg->abs_patches == NULL)
3001 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3002 g_hash_table_insert (cfg->abs_patches, ji, ji);
3003 ins = mono_emit_native_call (cfg, ji, sig, args);
3004 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3009 direct_icalls_enabled (MonoCompile *cfg)
3011 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3013 if (cfg->compile_llvm)
3016 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3022 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3025 * Call the jit icall without a wrapper if possible.
3026 * The wrapper is needed for the following reasons:
3027 * - to handle exceptions thrown using mono_raise_exceptions () from the
3028 * icall function. The EH code needs the lmf frame pushed by the
3029 * wrapper to be able to unwind back to managed code.
3030 * - to be able to do stack walks for asynchronously suspended
3031 * threads when debugging.
3033 if (info->no_raise && direct_icalls_enabled (cfg)) {
3037 if (!info->wrapper_method) {
3038 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3039 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3041 mono_memory_barrier ();
3045 * Inline the wrapper method, which is basically a call to the C icall, and
3046 * an exception check.
3048 costs = inline_method (cfg, info->wrapper_method, NULL,
3049 args, NULL, cfg->real_offset, TRUE);
3050 g_assert (costs > 0);
3051 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3055 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3060 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3062 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3063 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3067 * Native code might return non register sized integers
3068 * without initializing the upper bits.
3070 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3071 case OP_LOADI1_MEMBASE:
3072 widen_op = OP_ICONV_TO_I1;
3074 case OP_LOADU1_MEMBASE:
3075 widen_op = OP_ICONV_TO_U1;
3077 case OP_LOADI2_MEMBASE:
3078 widen_op = OP_ICONV_TO_I2;
3080 case OP_LOADU2_MEMBASE:
3081 widen_op = OP_ICONV_TO_U2;
3087 if (widen_op != -1) {
3088 int dreg = alloc_preg (cfg);
3091 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3092 widen->type = ins->type;
3102 get_memcpy_method (void)
3104 static MonoMethod *memcpy_method = NULL;
3105 if (!memcpy_method) {
3106 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3108 g_error ("Old corlib found. Install a new one");
3110 return memcpy_method;
3114 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3116 MonoClassField *field;
3117 gpointer iter = NULL;
3119 while ((field = mono_class_get_fields (klass, &iter))) {
3122 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3124 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3125 if (mini_type_is_reference (mono_field_get_type (field))) {
3126 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3127 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3129 MonoClass *field_class = mono_class_from_mono_type (field->type);
3130 if (field_class->has_references)
3131 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3137 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3139 int card_table_shift_bits;
3140 gpointer card_table_mask;
3142 MonoInst *dummy_use;
3143 int nursery_shift_bits;
3144 size_t nursery_size;
3146 if (!cfg->gen_write_barriers)
3149 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3151 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3153 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3156 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3157 wbarrier->sreg1 = ptr->dreg;
3158 wbarrier->sreg2 = value->dreg;
3159 MONO_ADD_INS (cfg->cbb, wbarrier);
3160 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3161 int offset_reg = alloc_preg (cfg);
3162 int card_reg = alloc_preg (cfg);
3165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3166 if (card_table_mask)
3167 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3169 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3170 * IMM's larger than 32bits.
3172 if (cfg->compile_aot) {
3173 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3175 MONO_INST_NEW (cfg, ins, OP_PCONST);
3176 ins->inst_p0 = card_table;
3177 ins->dreg = card_reg;
3178 MONO_ADD_INS (cfg->cbb, ins);
3181 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3182 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3184 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3185 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3188 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3192 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3194 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3195 unsigned need_wb = 0;
3200 /*types with references can't have alignment smaller than sizeof(void*) */
3201 if (align < SIZEOF_VOID_P)
3204 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3205 if (size > 32 * SIZEOF_VOID_P)
3208 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3210 /* We don't unroll more than 5 stores to avoid code bloat. */
3211 if (size > 5 * SIZEOF_VOID_P) {
3212 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3213 size += (SIZEOF_VOID_P - 1);
3214 size &= ~(SIZEOF_VOID_P - 1);
3216 EMIT_NEW_ICONST (cfg, iargs [2], size);
3217 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3218 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3222 destreg = iargs [0]->dreg;
3223 srcreg = iargs [1]->dreg;
3226 dest_ptr_reg = alloc_preg (cfg);
3227 tmp_reg = alloc_preg (cfg);
3230 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3232 while (size >= SIZEOF_VOID_P) {
3233 MonoInst *load_inst;
3234 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3235 load_inst->dreg = tmp_reg;
3236 load_inst->inst_basereg = srcreg;
3237 load_inst->inst_offset = offset;
3238 MONO_ADD_INS (cfg->cbb, load_inst);
3240 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3243 emit_write_barrier (cfg, iargs [0], load_inst);
3245 offset += SIZEOF_VOID_P;
3246 size -= SIZEOF_VOID_P;
3249 /*tmp += sizeof (void*)*/
3250 if (size >= SIZEOF_VOID_P) {
3251 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3252 MONO_ADD_INS (cfg->cbb, iargs [0]);
3256 /* Those cannot be references since size < sizeof (void*) */
3258 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3259 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3265 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3266 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3272 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3273 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3282 * Emit code to copy a valuetype of type @klass whose address is stored in
3283 * @src->dreg to memory whose address is stored at @dest->dreg.
3286 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3288 MonoInst *iargs [4];
3291 MonoMethod *memcpy_method;
3292 MonoInst *size_ins = NULL;
3293 MonoInst *memcpy_ins = NULL;
3297 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3300 * This check breaks with spilled vars... need to handle it during verification anyway.
3301 * g_assert (klass && klass == src->klass && klass == dest->klass);
3304 if (mini_is_gsharedvt_klass (klass)) {
3306 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3307 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3311 n = mono_class_native_size (klass, &align);
3313 n = mono_class_value_size (klass, &align);
3315 /* if native is true there should be no references in the struct */
3316 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3317 /* Avoid barriers when storing to the stack */
3318 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3319 (dest->opcode == OP_LDADDR))) {
3325 context_used = mini_class_check_context_used (cfg, klass);
3327 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3328 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3330 } else if (context_used) {
3331 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3333 if (cfg->compile_aot) {
3334 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3336 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3337 mono_class_compute_gc_descriptor (klass);
3342 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3344 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3349 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3350 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3351 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3356 iargs [2] = size_ins;
3358 EMIT_NEW_ICONST (cfg, iargs [2], n);
3360 memcpy_method = get_memcpy_method ();
3362 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3364 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3369 get_memset_method (void)
3371 static MonoMethod *memset_method = NULL;
3372 if (!memset_method) {
3373 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3375 g_error ("Old corlib found. Install a new one");
3377 return memset_method;
3381 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3383 MonoInst *iargs [3];
3386 MonoMethod *memset_method;
3387 MonoInst *size_ins = NULL;
3388 MonoInst *bzero_ins = NULL;
3389 static MonoMethod *bzero_method;
3391 /* FIXME: Optimize this for the case when dest is an LDADDR */
3392 mono_class_init (klass);
3393 if (mini_is_gsharedvt_klass (klass)) {
3394 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3395 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3397 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3398 g_assert (bzero_method);
3400 iargs [1] = size_ins;
3401 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3405 n = mono_class_value_size (klass, &align);
3407 if (n <= sizeof (gpointer) * 8) {
3408 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3411 memset_method = get_memset_method ();
3413 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3414 EMIT_NEW_ICONST (cfg, iargs [2], n);
3415 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3422 * Emit IR to return either the this pointer for instance method,
3423 * or the mrgctx for static methods.
3426 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3428 MonoInst *this_ins = NULL;
3430 g_assert (cfg->gshared);
3432 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3433 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3434 !method->klass->valuetype)
3435 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3437 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3438 MonoInst *mrgctx_loc, *mrgctx_var;
3440 g_assert (!this_ins);
3441 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3443 mrgctx_loc = mono_get_vtable_var (cfg);
3444 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3447 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3448 MonoInst *vtable_loc, *vtable_var;
3450 g_assert (!this_ins);
3452 vtable_loc = mono_get_vtable_var (cfg);
3453 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3455 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3456 MonoInst *mrgctx_var = vtable_var;
3459 vtable_reg = alloc_preg (cfg);
3460 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3461 vtable_var->type = STACK_PTR;
3469 vtable_reg = alloc_preg (cfg);
3470 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3475 static MonoJumpInfoRgctxEntry *
3476 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3478 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3479 res->method = method;
3480 res->in_mrgctx = in_mrgctx;
3481 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3482 res->data->type = patch_type;
3483 res->data->data.target = patch_data;
3484 res->info_type = info_type;
3489 static inline MonoInst*
3490 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3492 MonoInst *args [16];
3495 // FIXME: No fastpath since the slot is not a compile time constant
3497 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3498 if (entry->in_mrgctx)
3499 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3501 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3505 * FIXME: This can be called during decompose, which is a problem since it creates
3507 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3509 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3511 MonoBasicBlock *is_null_bb, *end_bb;
3512 MonoInst *res, *ins, *call;
3515 slot = mini_get_rgctx_entry_slot (entry);
3517 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3518 index = MONO_RGCTX_SLOT_INDEX (slot);
3520 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3521 for (depth = 0; ; ++depth) {
3522 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3524 if (index < size - 1)
3529 NEW_BBLOCK (cfg, end_bb);
3530 NEW_BBLOCK (cfg, is_null_bb);
3533 rgctx_reg = rgctx->dreg;
3535 rgctx_reg = alloc_preg (cfg);
3537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3538 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3539 NEW_BBLOCK (cfg, is_null_bb);
3541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3542 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3545 for (i = 0; i < depth; ++i) {
3546 int array_reg = alloc_preg (cfg);
3548 /* load ptr to next array */
3549 if (mrgctx && i == 0)
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3553 rgctx_reg = array_reg;
3554 /* is the ptr null? */
3555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3556 /* if yes, jump to actual trampoline */
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3561 val_reg = alloc_preg (cfg);
3562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3563 /* is the slot null? */
3564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3565 /* if yes, jump to actual trampoline */
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3569 res_reg = alloc_preg (cfg);
3570 MONO_INST_NEW (cfg, ins, OP_MOVE);
3571 ins->dreg = res_reg;
3572 ins->sreg1 = val_reg;
3573 MONO_ADD_INS (cfg->cbb, ins);
3575 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3578 MONO_START_BB (cfg, is_null_bb);
3580 EMIT_NEW_ICONST (cfg, args [1], index);
3582 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3584 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3585 MONO_INST_NEW (cfg, ins, OP_MOVE);
3586 ins->dreg = res_reg;
3587 ins->sreg1 = call->dreg;
3588 MONO_ADD_INS (cfg->cbb, ins);
3589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3591 MONO_START_BB (cfg, end_bb);
3600 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3603 static inline MonoInst*
3604 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3607 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3609 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3613 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3614 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3616 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3617 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3619 return emit_rgctx_fetch (cfg, rgctx, entry);
3623 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3624 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3626 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3627 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3629 return emit_rgctx_fetch (cfg, rgctx, entry);
3633 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3634 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3636 MonoJumpInfoGSharedVtCall *call_info;
3637 MonoJumpInfoRgctxEntry *entry;
3640 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3641 call_info->sig = sig;
3642 call_info->method = cmethod;
3644 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3645 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3647 return emit_rgctx_fetch (cfg, rgctx, entry);
3651 * emit_get_rgctx_virt_method:
3653 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3656 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3657 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3659 MonoJumpInfoVirtMethod *info;
3660 MonoJumpInfoRgctxEntry *entry;
3663 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3664 info->klass = klass;
3665 info->method = virt_method;
3667 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3668 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3670 return emit_rgctx_fetch (cfg, rgctx, entry);
3674 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3675 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3677 MonoJumpInfoRgctxEntry *entry;
3680 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3681 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3683 return emit_rgctx_fetch (cfg, rgctx, entry);
3687 * emit_get_rgctx_method:
3689 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3690 * normal constants, else emit a load from the rgctx.
3693 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3694 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3696 if (!context_used) {
3699 switch (rgctx_type) {
3700 case MONO_RGCTX_INFO_METHOD:
3701 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3703 case MONO_RGCTX_INFO_METHOD_RGCTX:
3704 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3707 g_assert_not_reached ();
3710 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3711 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3713 return emit_rgctx_fetch (cfg, rgctx, entry);
3718 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3719 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3721 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3722 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3724 return emit_rgctx_fetch (cfg, rgctx, entry);
3728 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3730 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3731 MonoRuntimeGenericContextInfoTemplate *template;
3736 for (i = 0; i < info->num_entries; ++i) {
3737 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3739 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3743 if (info->num_entries == info->count_entries) {
3744 MonoRuntimeGenericContextInfoTemplate *new_entries;
3745 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3747 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3749 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3750 info->entries = new_entries;
3751 info->count_entries = new_count_entries;
3754 idx = info->num_entries;
3755 template = &info->entries [idx];
3756 template->info_type = rgctx_type;
3757 template->data = data;
3759 info->num_entries ++;
3765 * emit_get_gsharedvt_info:
3767 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3770 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3775 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3776 /* Load info->entries [idx] */
3777 dreg = alloc_preg (cfg);
3778 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3784 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3786 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3790 * On return the caller must check @klass for load errors.
3793 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3795 MonoInst *vtable_arg;
3798 context_used = mini_class_check_context_used (cfg, klass);
3801 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3802 klass, MONO_RGCTX_INFO_VTABLE);
3804 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3808 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3811 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3815 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3816 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3818 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3819 ins->sreg1 = vtable_arg->dreg;
3820 MONO_ADD_INS (cfg->cbb, ins);
3822 static int byte_offset = -1;
3823 static guint8 bitmask;
3824 int bits_reg, inited_reg;
3825 MonoBasicBlock *inited_bb;
3826 MonoInst *args [16];
3828 if (byte_offset < 0)
3829 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3831 bits_reg = alloc_ireg (cfg);
3832 inited_reg = alloc_ireg (cfg);
3834 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3837 NEW_BBLOCK (cfg, inited_bb);
3839 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3840 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3842 args [0] = vtable_arg;
3843 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3845 MONO_START_BB (cfg, inited_bb);
3850 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3854 if (cfg->gen_seq_points && cfg->method == method) {
3855 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3857 ins->flags |= MONO_INST_NONEMPTY_STACK;
3858 MONO_ADD_INS (cfg->cbb, ins);
3863 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3865 if (mini_get_debug_options ()->better_cast_details) {
3866 int vtable_reg = alloc_preg (cfg);
3867 int klass_reg = alloc_preg (cfg);
3868 MonoBasicBlock *is_null_bb = NULL;
3870 int to_klass_reg, context_used;
3873 NEW_BBLOCK (cfg, is_null_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3879 tls_get = mono_get_jit_tls_intrinsic (cfg);
3881 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3885 MONO_ADD_INS (cfg->cbb, tls_get);
3886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3889 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3891 context_used = mini_class_check_context_used (cfg, klass);
3893 MonoInst *class_ins;
3895 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3896 to_klass_reg = class_ins->dreg;
3898 to_klass_reg = alloc_preg (cfg);
3899 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3904 MONO_START_BB (cfg, is_null_bb);
3909 reset_cast_details (MonoCompile *cfg)
3911 /* Reset the variables holding the cast details */
3912 if (mini_get_debug_options ()->better_cast_details) {
3913 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3915 MONO_ADD_INS (cfg->cbb, tls_get);
3916 /* It is enough to reset the from field */
3917 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3922 * On return the caller must check @array_class for load errors
3925 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3927 int vtable_reg = alloc_preg (cfg);
3930 context_used = mini_class_check_context_used (cfg, array_class);
3932 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3934 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3936 if (cfg->opt & MONO_OPT_SHARED) {
3937 int class_reg = alloc_preg (cfg);
3938 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3939 if (cfg->compile_aot) {
3940 int klass_reg = alloc_preg (cfg);
3941 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3942 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3944 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3946 } else if (context_used) {
3947 MonoInst *vtable_ins;
3949 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3950 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3952 if (cfg->compile_aot) {
3956 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3958 vt_reg = alloc_preg (cfg);
3959 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3960 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3963 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3965 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3969 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3971 reset_cast_details (cfg);
3975 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3976 * generic code is generated.
3979 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3981 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3984 MonoInst *rgctx, *addr;
3986 /* FIXME: What if the class is shared? We might not
3987 have to get the address of the method from the
3989 addr = emit_get_rgctx_method (cfg, context_used, method,
3990 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3992 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3994 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3996 gboolean pass_vtable, pass_mrgctx;
3997 MonoInst *rgctx_arg = NULL;
3999 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4000 g_assert (!pass_mrgctx);
4003 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4006 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4009 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4014 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4018 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4019 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4020 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4021 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4023 obj_reg = sp [0]->dreg;
4024 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4025 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4027 /* FIXME: generics */
4028 g_assert (klass->rank == 0);
4031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4032 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4034 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4038 MonoInst *element_class;
4040 /* This assertion is from the unboxcast insn */
4041 g_assert (klass->rank == 0);
4043 element_class = emit_get_rgctx_klass (cfg, context_used,
4044 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4046 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4047 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4049 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4050 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4051 reset_cast_details (cfg);
4054 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4055 MONO_ADD_INS (cfg->cbb, add);
4056 add->type = STACK_MP;
4063 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4065 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4066 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4070 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4076 args [1] = klass_inst;
4079 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4081 NEW_BBLOCK (cfg, is_ref_bb);
4082 NEW_BBLOCK (cfg, is_nullable_bb);
4083 NEW_BBLOCK (cfg, end_bb);
4084 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4086 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4091 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4092 addr_reg = alloc_dreg (cfg, STACK_MP);
4096 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4097 MONO_ADD_INS (cfg->cbb, addr);
4099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4102 MONO_START_BB (cfg, is_ref_bb);
4104 /* Save the ref to a temporary */
4105 dreg = alloc_ireg (cfg);
4106 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4107 addr->dreg = addr_reg;
4108 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4109 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4112 MONO_START_BB (cfg, is_nullable_bb);
4115 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4116 MonoInst *unbox_call;
4117 MonoMethodSignature *unbox_sig;
4119 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4120 unbox_sig->ret = &klass->byval_arg;
4121 unbox_sig->param_count = 1;
4122 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4123 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4125 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4126 addr->dreg = addr_reg;
4129 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4132 MONO_START_BB (cfg, end_bb);
4135 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4141 * Returns NULL and set the cfg exception on error.
4144 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4146 MonoInst *iargs [2];
4152 MonoInst *iargs [2];
4153 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4155 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4157 if (cfg->opt & MONO_OPT_SHARED)
4158 rgctx_info = MONO_RGCTX_INFO_KLASS;
4160 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4161 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4163 if (cfg->opt & MONO_OPT_SHARED) {
4164 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4166 alloc_ftn = mono_object_new;
4169 alloc_ftn = mono_object_new_specific;
4172 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4173 if (known_instance_size) {
4174 int size = mono_class_instance_size (klass);
4175 if (size < sizeof (MonoObject))
4176 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4178 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4180 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4183 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4186 if (cfg->opt & MONO_OPT_SHARED) {
4187 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4188 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4190 alloc_ftn = mono_object_new;
4191 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4192 /* This happens often in argument checking code, eg. throw new FooException... */
4193 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4194 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4195 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4197 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4198 MonoMethod *managed_alloc = NULL;
4202 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4203 cfg->exception_ptr = klass;
4207 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4209 if (managed_alloc) {
4210 int size = mono_class_instance_size (klass);
4211 if (size < sizeof (MonoObject))
4212 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4214 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4215 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4216 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4218 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4220 guint32 lw = vtable->klass->instance_size;
4221 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4222 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4223 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4226 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4230 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4234 * Returns NULL and set the cfg exception on error.
4237 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4239 MonoInst *alloc, *ins;
4241 if (mono_class_is_nullable (klass)) {
4242 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4245 /* FIXME: What if the class is shared? We might not
4246 have to get the method address from the RGCTX. */
4247 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4248 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4249 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4251 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4253 gboolean pass_vtable, pass_mrgctx;
4254 MonoInst *rgctx_arg = NULL;
4256 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4257 g_assert (!pass_mrgctx);
4260 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4263 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4266 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4270 if (mini_is_gsharedvt_klass (klass)) {
4271 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4272 MonoInst *res, *is_ref, *src_var, *addr;
4275 dreg = alloc_ireg (cfg);
4277 NEW_BBLOCK (cfg, is_ref_bb);
4278 NEW_BBLOCK (cfg, is_nullable_bb);
4279 NEW_BBLOCK (cfg, end_bb);
4280 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4285 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4288 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4291 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4292 ins->opcode = OP_STOREV_MEMBASE;
4294 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4295 res->type = STACK_OBJ;
4297 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4300 MONO_START_BB (cfg, is_ref_bb);
4302 /* val is a vtype, so has to load the value manually */
4303 src_var = get_vreg_to_inst (cfg, val->dreg);
4305 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4306 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4307 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4308 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4311 MONO_START_BB (cfg, is_nullable_bb);
4314 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4315 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4317 MonoMethodSignature *box_sig;
4320 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4321 * construct that method at JIT time, so have to do things by hand.
4323 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4324 box_sig->ret = &mono_defaults.object_class->byval_arg;
4325 box_sig->param_count = 1;
4326 box_sig->params [0] = &klass->byval_arg;
4327 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4328 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4329 res->type = STACK_OBJ;
4333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4335 MONO_START_BB (cfg, end_bb);
4339 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4343 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4349 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4352 MonoGenericContainer *container;
4353 MonoGenericInst *ginst;
4355 if (klass->generic_class) {
4356 container = klass->generic_class->container_class->generic_container;
4357 ginst = klass->generic_class->context.class_inst;
4358 } else if (klass->generic_container && context_used) {
4359 container = klass->generic_container;
4360 ginst = container->context.class_inst;
4365 for (i = 0; i < container->type_argc; ++i) {
4367 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4369 type = ginst->type_argv [i];
4370 if (mini_type_is_reference (type))
4376 static GHashTable* direct_icall_type_hash;
4379 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4381 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4382 if (!direct_icalls_enabled (cfg))
4386 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4387 * Whitelist a few icalls for now.
4389 if (!direct_icall_type_hash) {
4390 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4392 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4393 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4394 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4395 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4396 mono_memory_barrier ();
4397 direct_icall_type_hash = h;
4400 if (cmethod->klass == mono_defaults.math_class)
4402 /* No locking needed */
4403 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4408 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4411 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4413 MonoMethod *mono_castclass;
4416 mono_castclass = mono_marshal_get_castclass_with_cache ();
4418 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4419 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4420 reset_cast_details (cfg);
4426 get_castclass_cache_idx (MonoCompile *cfg)
4428 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4429 cfg->castclass_cache_index ++;
4430 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4434 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4443 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4446 if (cfg->compile_aot) {
4447 idx = get_castclass_cache_idx (cfg);
4448 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4450 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4453 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4454 return emit_castclass_with_cache (cfg, klass, args);
4458 * Returns NULL and set the cfg exception on error.
4461 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4463 MonoBasicBlock *is_null_bb;
4464 int obj_reg = src->dreg;
4465 int vtable_reg = alloc_preg (cfg);
4467 MonoInst *klass_inst = NULL, *res;
4469 context_used = mini_class_check_context_used (cfg, klass);
4471 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4472 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4473 (*inline_costs) += 2;
4475 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4476 MonoMethod *mono_castclass;
4477 MonoInst *iargs [1];
4480 mono_castclass = mono_marshal_get_castclass (klass);
4483 save_cast_details (cfg, klass, src->dreg, TRUE);
4484 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4485 iargs, ip, cfg->real_offset, TRUE);
4486 reset_cast_details (cfg);
4487 CHECK_CFG_EXCEPTION;
4488 g_assert (costs > 0);
4490 cfg->real_offset += 5;
4492 (*inline_costs) += costs;
4500 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4501 MonoInst *cache_ins;
4503 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4508 /* klass - it's the second element of the cache entry*/
4509 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4512 args [2] = cache_ins;
4514 return emit_castclass_with_cache (cfg, klass, args);
4517 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4520 NEW_BBLOCK (cfg, is_null_bb);
4522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4523 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4525 save_cast_details (cfg, klass, obj_reg, FALSE);
4527 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4528 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4529 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4531 int klass_reg = alloc_preg (cfg);
4533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4535 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4536 /* the remoting code is broken, access the class for now */
4537 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4538 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4540 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4541 cfg->exception_ptr = klass;
4544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4549 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4552 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4556 MONO_START_BB (cfg, is_null_bb);
4558 reset_cast_details (cfg);
4567 * Returns NULL and set the cfg exception on error.
4570 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4573 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4574 int obj_reg = src->dreg;
4575 int vtable_reg = alloc_preg (cfg);
4576 int res_reg = alloc_ireg_ref (cfg);
4577 MonoInst *klass_inst = NULL;
4582 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4583 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4584 MonoInst *cache_ins;
4586 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4591 /* klass - it's the second element of the cache entry*/
4592 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4595 args [2] = cache_ins;
4597 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4600 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4603 NEW_BBLOCK (cfg, is_null_bb);
4604 NEW_BBLOCK (cfg, false_bb);
4605 NEW_BBLOCK (cfg, end_bb);
4607 /* Do the assignment at the beginning, so the other assignment can be if converted */
4608 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4609 ins->type = STACK_OBJ;
4612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4613 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4617 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4618 g_assert (!context_used);
4619 /* the is_null_bb target simply copies the input register to the output */
4620 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4622 int klass_reg = alloc_preg (cfg);
4625 int rank_reg = alloc_preg (cfg);
4626 int eclass_reg = alloc_preg (cfg);
4628 g_assert (!context_used);
4629 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4634 if (klass->cast_class == mono_defaults.object_class) {
4635 int parent_reg = alloc_preg (cfg);
4636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4637 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4638 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4639 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4640 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4641 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4642 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4644 } else if (klass->cast_class == mono_defaults.enum_class) {
4645 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4646 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4647 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4648 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4650 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4651 /* Check that the object is a vector too */
4652 int bounds_reg = alloc_preg (cfg);
4653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4658 /* the is_null_bb target simply copies the input register to the output */
4659 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4661 } else if (mono_class_is_nullable (klass)) {
4662 g_assert (!context_used);
4663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4664 /* the is_null_bb target simply copies the input register to the output */
4665 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4667 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4668 g_assert (!context_used);
4669 /* the remoting code is broken, access the class for now */
4670 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4671 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4673 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4674 cfg->exception_ptr = klass;
4677 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4686 /* the is_null_bb target simply copies the input register to the output */
4687 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4692 MONO_START_BB (cfg, false_bb);
4694 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4697 MONO_START_BB (cfg, is_null_bb);
4699 MONO_START_BB (cfg, end_bb);
4705 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4707 /* This opcode takes as input an object reference and a class, and returns:
4708 0) if the object is an instance of the class,
4709 1) if the object is not instance of the class,
4710 2) if the object is a proxy whose type cannot be determined */
4713 #ifndef DISABLE_REMOTING
4714 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4716 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4718 int obj_reg = src->dreg;
4719 int dreg = alloc_ireg (cfg);
4721 #ifndef DISABLE_REMOTING
4722 int klass_reg = alloc_preg (cfg);
4725 NEW_BBLOCK (cfg, true_bb);
4726 NEW_BBLOCK (cfg, false_bb);
4727 NEW_BBLOCK (cfg, end_bb);
4728 #ifndef DISABLE_REMOTING
4729 NEW_BBLOCK (cfg, false2_bb);
4730 NEW_BBLOCK (cfg, no_proxy_bb);
4733 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4736 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4737 #ifndef DISABLE_REMOTING
4738 NEW_BBLOCK (cfg, interface_fail_bb);
4741 tmp_reg = alloc_preg (cfg);
4742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4743 #ifndef DISABLE_REMOTING
4744 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4745 MONO_START_BB (cfg, interface_fail_bb);
4746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4748 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4750 tmp_reg = alloc_preg (cfg);
4751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4753 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4755 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4758 #ifndef DISABLE_REMOTING
4759 tmp_reg = alloc_preg (cfg);
4760 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4763 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4764 tmp_reg = alloc_preg (cfg);
4765 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4768 tmp_reg = alloc_preg (cfg);
4769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4770 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4773 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4774 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4776 MONO_START_BB (cfg, no_proxy_bb);
4778 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4780 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4784 MONO_START_BB (cfg, false_bb);
4786 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4787 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4789 #ifndef DISABLE_REMOTING
4790 MONO_START_BB (cfg, false2_bb);
4792 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4793 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4796 MONO_START_BB (cfg, true_bb);
4798 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4800 MONO_START_BB (cfg, end_bb);
4803 MONO_INST_NEW (cfg, ins, OP_ICONST);
4805 ins->type = STACK_I4;
4811 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4813 /* This opcode takes as input an object reference and a class, and returns:
4814 0) if the object is an instance of the class,
4815 1) if the object is a proxy whose type cannot be determined
4816 an InvalidCastException exception is thrown otherwhise*/
4819 #ifndef DISABLE_REMOTING
4820 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4822 MonoBasicBlock *ok_result_bb;
4824 int obj_reg = src->dreg;
4825 int dreg = alloc_ireg (cfg);
4826 int tmp_reg = alloc_preg (cfg);
4828 #ifndef DISABLE_REMOTING
4829 int klass_reg = alloc_preg (cfg);
4830 NEW_BBLOCK (cfg, end_bb);
4833 NEW_BBLOCK (cfg, ok_result_bb);
4835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4836 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4838 save_cast_details (cfg, klass, obj_reg, FALSE);
4840 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4841 #ifndef DISABLE_REMOTING
4842 NEW_BBLOCK (cfg, interface_fail_bb);
4844 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4845 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4846 MONO_START_BB (cfg, interface_fail_bb);
4847 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4849 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4851 tmp_reg = alloc_preg (cfg);
4852 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4854 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4856 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4857 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4859 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4860 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4861 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4864 #ifndef DISABLE_REMOTING
4865 NEW_BBLOCK (cfg, no_proxy_bb);
4867 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4868 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4869 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4871 tmp_reg = alloc_preg (cfg);
4872 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4873 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4875 tmp_reg = alloc_preg (cfg);
4876 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4878 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4880 NEW_BBLOCK (cfg, fail_1_bb);
4882 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4884 MONO_START_BB (cfg, fail_1_bb);
4886 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4887 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4889 MONO_START_BB (cfg, no_proxy_bb);
4891 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4893 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4897 MONO_START_BB (cfg, ok_result_bb);
4899 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4901 #ifndef DISABLE_REMOTING
4902 MONO_START_BB (cfg, end_bb);
4906 MONO_INST_NEW (cfg, ins, OP_ICONST);
4908 ins->type = STACK_I4;
4913 static G_GNUC_UNUSED MonoInst*
4914 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4916 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4917 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4920 switch (enum_type->type) {
4923 #if SIZEOF_REGISTER == 8
4935 MonoInst *load, *and, *cmp, *ceq;
4936 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4937 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4938 int dest_reg = alloc_ireg (cfg);
4940 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4941 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4942 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4943 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4945 ceq->type = STACK_I4;
4948 load = mono_decompose_opcode (cfg, load);
4949 and = mono_decompose_opcode (cfg, and);
4950 cmp = mono_decompose_opcode (cfg, cmp);
4951 ceq = mono_decompose_opcode (cfg, ceq);
4959 * Returns NULL and set the cfg exception on error.
4961 static G_GNUC_UNUSED MonoInst*
4962 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4966 gpointer trampoline;
4967 MonoInst *obj, *method_ins, *tramp_ins;
4971 if (virtual && !cfg->llvm_only) {
4972 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4975 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4979 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4983 if (cfg->llvm_only) {
4984 MonoInst *args [16];
4987 * If the method to be called needs an rgctx, we can't fall back to mono_delegate_ctor (), since it might receive
4988 * the address of a gshared method. So use a JIT icall.
4989 * FIXME: Optimize this.
4993 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4994 mono_emit_jit_icall (cfg, virtual ? mono_init_delegate_virtual : mono_init_delegate, args);
4999 /* Inline the contents of mono_delegate_ctor */
5001 /* Set target field */
5002 /* Optimize away setting of NULL target */
5003 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5004 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5005 if (cfg->gen_write_barriers) {
5006 dreg = alloc_preg (cfg);
5007 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5008 emit_write_barrier (cfg, ptr, target);
5012 /* Set method field */
5013 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5014 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5017 * To avoid looking up the compiled code belonging to the target method
5018 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5019 * store it, and we fill it after the method has been compiled.
5021 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5022 MonoInst *code_slot_ins;
5025 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5027 domain = mono_domain_get ();
5028 mono_domain_lock (domain);
5029 if (!domain_jit_info (domain)->method_code_hash)
5030 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5031 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5033 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
5034 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5036 mono_domain_unlock (domain);
5038 if (cfg->compile_aot)
5039 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5041 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
5043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5046 if (cfg->compile_aot) {
5047 MonoDelegateClassMethodPair *del_tramp;
5049 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5050 del_tramp->klass = klass;
5051 del_tramp->method = context_used ? NULL : method;
5052 del_tramp->is_virtual = virtual;
5053 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5056 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5058 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5059 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5062 /* Set invoke_impl field */
5064 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5066 dreg = alloc_preg (cfg);
5067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5068 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5070 dreg = alloc_preg (cfg);
5071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5072 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5075 dreg = alloc_preg (cfg);
5076 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual ? 1 : 0);
5077 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5079 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5085 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5087 MonoJitICallInfo *info;
5089 /* Need to register the icall so it gets an icall wrapper */
5090 info = mono_get_array_new_va_icall (rank);
5092 cfg->flags |= MONO_CFG_HAS_VARARGS;
5094 /* mono_array_new_va () needs a vararg calling convention */
5095 cfg->disable_llvm = TRUE;
5097 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5098 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5102 * handle_constrained_gsharedvt_call:
5104 * Handle constrained calls where the receiver is a gsharedvt type.
5105 * Return the instruction representing the call. Set the cfg exception on failure.
5108 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5109 gboolean *ref_emit_widen)
5111 MonoInst *ins = NULL;
5112 gboolean emit_widen = *ref_emit_widen;
5115 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5116 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5117 * pack the arguments into an array, and do the rest of the work in in an icall.
5119 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5120 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5121 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5122 MonoInst *args [16];
5125 * This case handles calls to
5126 * - object:ToString()/Equals()/GetHashCode(),
5127 * - System.IComparable<T>:CompareTo()
5128 * - System.IEquatable<T>:Equals ()
5129 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5133 if (mono_method_check_context_used (cmethod))
5134 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5136 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5137 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5139 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5140 if (fsig->hasthis && fsig->param_count) {
5141 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5142 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5143 ins->dreg = alloc_preg (cfg);
5144 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5145 MONO_ADD_INS (cfg->cbb, ins);
5148 if (mini_is_gsharedvt_type (fsig->params [0])) {
5151 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5153 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5154 addr_reg = ins->dreg;
5155 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5157 EMIT_NEW_ICONST (cfg, args [3], 0);
5158 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5161 EMIT_NEW_ICONST (cfg, args [3], 0);
5162 EMIT_NEW_ICONST (cfg, args [4], 0);
5164 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5167 if (mini_is_gsharedvt_type (fsig->ret)) {
5168 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5169 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5173 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5174 MONO_ADD_INS (cfg->cbb, add);
5176 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5177 MONO_ADD_INS (cfg->cbb, ins);
5178 /* ins represents the call result */
5181 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5184 *ref_emit_widen = emit_widen;
5193 mono_emit_load_got_addr (MonoCompile *cfg)
5195 MonoInst *getaddr, *dummy_use;
5197 if (!cfg->got_var || cfg->got_var_allocated)
5200 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5201 getaddr->cil_code = cfg->header->code;
5202 getaddr->dreg = cfg->got_var->dreg;
5204 /* Add it to the start of the first bblock */
5205 if (cfg->bb_entry->code) {
5206 getaddr->next = cfg->bb_entry->code;
5207 cfg->bb_entry->code = getaddr;
5210 MONO_ADD_INS (cfg->bb_entry, getaddr);
5212 cfg->got_var_allocated = TRUE;
5215 * Add a dummy use to keep the got_var alive, since real uses might
5216 * only be generated by the back ends.
5217 * Add it to end_bblock, so the variable's lifetime covers the whole
5219 * It would be better to make the usage of the got var explicit in all
5220 * cases when the backend needs it (i.e. calls, throw etc.), so this
5221 * wouldn't be needed.
5223 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5224 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5227 static int inline_limit;
5228 static gboolean inline_limit_inited;
5231 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5233 MonoMethodHeaderSummary header;
5235 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5236 MonoMethodSignature *sig = mono_method_signature (method);
5240 if (cfg->disable_inline)
5245 if (cfg->inline_depth > 10)
5248 if (!mono_method_get_header_summary (method, &header))
5251 /*runtime, icall and pinvoke are checked by summary call*/
5252 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5253 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5254 (mono_class_is_marshalbyref (method->klass)) ||
5258 /* also consider num_locals? */
5259 /* Do the size check early to avoid creating vtables */
5260 if (!inline_limit_inited) {
5261 if (g_getenv ("MONO_INLINELIMIT"))
5262 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5264 inline_limit = INLINE_LENGTH_LIMIT;
5265 inline_limit_inited = TRUE;
5267 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5271 * if we can initialize the class of the method right away, we do,
5272 * otherwise we don't allow inlining if the class needs initialization,
5273 * since it would mean inserting a call to mono_runtime_class_init()
5274 * inside the inlined code
5276 if (!(cfg->opt & MONO_OPT_SHARED)) {
5277 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5278 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5279 vtable = mono_class_vtable (cfg->domain, method->klass);
5282 if (!cfg->compile_aot)
5283 mono_runtime_class_init (vtable);
5284 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5285 if (cfg->run_cctors && method->klass->has_cctor) {
5286 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5287 if (!method->klass->runtime_info)
5288 /* No vtable created yet */
5290 vtable = mono_class_vtable (cfg->domain, method->klass);
5293 /* This makes so that inline cannot trigger */
5294 /* .cctors: too many apps depend on them */
5295 /* running with a specific order... */
5296 if (! vtable->initialized)
5298 mono_runtime_class_init (vtable);
5300 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5301 if (!method->klass->runtime_info)
5302 /* No vtable created yet */
5304 vtable = mono_class_vtable (cfg->domain, method->klass);
5307 if (!vtable->initialized)
5312 * If we're compiling for shared code
5313 * the cctor will need to be run at aot method load time, for example,
5314 * or at the end of the compilation of the inlining method.
5316 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5320 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5321 if (mono_arch_is_soft_float ()) {
5323 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5325 for (i = 0; i < sig->param_count; ++i)
5326 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5331 if (g_list_find (cfg->dont_inline, method))
5338 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5340 if (!cfg->compile_aot) {
5342 if (vtable->initialized)
5346 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5347 if (cfg->method == method)
5351 if (!mono_class_needs_cctor_run (klass, method))
5354 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5355 /* The initialization is already done before the method is called */
5362 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5366 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5369 if (mini_is_gsharedvt_variable_klass (klass)) {
5372 mono_class_init (klass);
5373 size = mono_class_array_element_size (klass);
5376 mult_reg = alloc_preg (cfg);
5377 array_reg = arr->dreg;
5378 index_reg = index->dreg;
5380 #if SIZEOF_REGISTER == 8
5381 /* The array reg is 64 bits but the index reg is only 32 */
5382 if (COMPILE_LLVM (cfg)) {
5384 index2_reg = index_reg;
5386 index2_reg = alloc_preg (cfg);
5387 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5390 if (index->type == STACK_I8) {
5391 index2_reg = alloc_preg (cfg);
5392 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5394 index2_reg = index_reg;
5399 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5401 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5402 if (size == 1 || size == 2 || size == 4 || size == 8) {
5403 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5405 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5406 ins->klass = mono_class_get_element_class (klass);
5407 ins->type = STACK_MP;
5413 add_reg = alloc_ireg_mp (cfg);
5416 MonoInst *rgctx_ins;
5419 g_assert (cfg->gshared);
5420 context_used = mini_class_check_context_used (cfg, klass);
5421 g_assert (context_used);
5422 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5423 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5427 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5428 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5429 ins->klass = mono_class_get_element_class (klass);
5430 ins->type = STACK_MP;
5431 MONO_ADD_INS (cfg->cbb, ins);
5437 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5439 int bounds_reg = alloc_preg (cfg);
5440 int add_reg = alloc_ireg_mp (cfg);
5441 int mult_reg = alloc_preg (cfg);
5442 int mult2_reg = alloc_preg (cfg);
5443 int low1_reg = alloc_preg (cfg);
5444 int low2_reg = alloc_preg (cfg);
5445 int high1_reg = alloc_preg (cfg);
5446 int high2_reg = alloc_preg (cfg);
5447 int realidx1_reg = alloc_preg (cfg);
5448 int realidx2_reg = alloc_preg (cfg);
5449 int sum_reg = alloc_preg (cfg);
5450 int index1, index2, tmpreg;
5454 mono_class_init (klass);
5455 size = mono_class_array_element_size (klass);
5457 index1 = index_ins1->dreg;
5458 index2 = index_ins2->dreg;
5460 #if SIZEOF_REGISTER == 8
5461 /* The array reg is 64 bits but the index reg is only 32 */
5462 if (COMPILE_LLVM (cfg)) {
5465 tmpreg = alloc_preg (cfg);
5466 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5468 tmpreg = alloc_preg (cfg);
5469 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5473 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5477 /* range checking */
5478 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5479 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5482 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5483 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5485 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5486 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5487 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5489 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5490 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5491 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5493 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5494 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5495 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5497 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5498 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5501 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5503 ins->type = STACK_MP;
5505 MONO_ADD_INS (cfg->cbb, ins);
5511 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5515 MonoMethod *addr_method;
5517 MonoClass *eclass = cmethod->klass->element_class;
5519 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5522 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5524 /* emit_ldelema_2 depends on OP_LMUL */
5525 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5526 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5529 if (mini_is_gsharedvt_variable_klass (eclass))
5532 element_size = mono_class_array_element_size (eclass);
5533 addr_method = mono_marshal_get_array_address (rank, element_size);
5534 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5539 static MonoBreakPolicy
5540 always_insert_breakpoint (MonoMethod *method)
5542 return MONO_BREAK_POLICY_ALWAYS;
5545 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5548 * mono_set_break_policy:
5549 * policy_callback: the new callback function
5551 * Allow embedders to decide wherther to actually obey breakpoint instructions
5552 * (both break IL instructions and Debugger.Break () method calls), for example
5553 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5554 * untrusted or semi-trusted code.
5556 * @policy_callback will be called every time a break point instruction needs to
5557 * be inserted with the method argument being the method that calls Debugger.Break()
5558 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5559 * if it wants the breakpoint to not be effective in the given method.
5560 * #MONO_BREAK_POLICY_ALWAYS is the default.
5563 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5565 if (policy_callback)
5566 break_policy_func = policy_callback;
5568 break_policy_func = always_insert_breakpoint;
5572 should_insert_brekpoint (MonoMethod *method) {
5573 switch (break_policy_func (method)) {
5574 case MONO_BREAK_POLICY_ALWAYS:
5576 case MONO_BREAK_POLICY_NEVER:
5578 case MONO_BREAK_POLICY_ON_DBG:
5579 g_warning ("mdb no longer supported");
5582 g_warning ("Incorrect value returned from break policy callback");
5587 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5589 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5591 MonoInst *addr, *store, *load;
5592 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5594 /* the bounds check is already done by the callers */
5595 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5597 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5598 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5599 if (mini_type_is_reference (fsig->params [2]))
5600 emit_write_barrier (cfg, addr, load);
5602 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5603 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5610 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5612 return mini_type_is_reference (&klass->byval_arg);
5616 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5618 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5619 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5620 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5621 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5622 MonoInst *iargs [3];
5625 mono_class_setup_vtable (obj_array);
5626 g_assert (helper->slot);
5628 if (sp [0]->type != STACK_OBJ)
5630 if (sp [2]->type != STACK_OBJ)
5637 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5641 if (mini_is_gsharedvt_variable_klass (klass)) {
5644 // FIXME-VT: OP_ICONST optimization
5645 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5646 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5647 ins->opcode = OP_STOREV_MEMBASE;
5648 } else if (sp [1]->opcode == OP_ICONST) {
5649 int array_reg = sp [0]->dreg;
5650 int index_reg = sp [1]->dreg;
5651 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5654 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5655 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5657 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5658 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5659 if (generic_class_is_reference_type (cfg, klass))
5660 emit_write_barrier (cfg, addr, sp [2]);
5667 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5672 eklass = mono_class_from_mono_type (fsig->params [2]);
5674 eklass = mono_class_from_mono_type (fsig->ret);
5677 return emit_array_store (cfg, eklass, args, FALSE);
5679 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5686 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5690 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5692 //Only allow for valuetypes
5693 if (!param_klass->valuetype || !return_klass->valuetype)
5697 if (param_klass->has_references || return_klass->has_references)
5700 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5701 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5702 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5705 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5706 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5709 //And have the same size
5710 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5716 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5718 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5719 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5721 //Valuetypes that are semantically equivalent
5722 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5725 //Arrays of valuetypes that are semantically equivalent
5726 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5733 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5735 #ifdef MONO_ARCH_SIMD_INTRINSICS
5736 MonoInst *ins = NULL;
5738 if (cfg->opt & MONO_OPT_SIMD) {
5739 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5745 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5749 emit_memory_barrier (MonoCompile *cfg, int kind)
5751 MonoInst *ins = NULL;
5752 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5753 MONO_ADD_INS (cfg->cbb, ins);
5754 ins->backend.memory_barrier_kind = kind;
5760 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5762 MonoInst *ins = NULL;
5765 /* The LLVM backend supports these intrinsics */
5766 if (cmethod->klass == mono_defaults.math_class) {
5767 if (strcmp (cmethod->name, "Sin") == 0) {
5769 } else if (strcmp (cmethod->name, "Cos") == 0) {
5771 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5773 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5777 if (opcode && fsig->param_count == 1) {
5778 MONO_INST_NEW (cfg, ins, opcode);
5779 ins->type = STACK_R8;
5780 ins->dreg = mono_alloc_freg (cfg);
5781 ins->sreg1 = args [0]->dreg;
5782 MONO_ADD_INS (cfg->cbb, ins);
5786 if (cfg->opt & MONO_OPT_CMOV) {
5787 if (strcmp (cmethod->name, "Min") == 0) {
5788 if (fsig->params [0]->type == MONO_TYPE_I4)
5790 if (fsig->params [0]->type == MONO_TYPE_U4)
5791 opcode = OP_IMIN_UN;
5792 else if (fsig->params [0]->type == MONO_TYPE_I8)
5794 else if (fsig->params [0]->type == MONO_TYPE_U8)
5795 opcode = OP_LMIN_UN;
5796 } else if (strcmp (cmethod->name, "Max") == 0) {
5797 if (fsig->params [0]->type == MONO_TYPE_I4)
5799 if (fsig->params [0]->type == MONO_TYPE_U4)
5800 opcode = OP_IMAX_UN;
5801 else if (fsig->params [0]->type == MONO_TYPE_I8)
5803 else if (fsig->params [0]->type == MONO_TYPE_U8)
5804 opcode = OP_LMAX_UN;
5808 if (opcode && fsig->param_count == 2) {
5809 MONO_INST_NEW (cfg, ins, opcode);
5810 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5811 ins->dreg = mono_alloc_ireg (cfg);
5812 ins->sreg1 = args [0]->dreg;
5813 ins->sreg2 = args [1]->dreg;
5814 MONO_ADD_INS (cfg->cbb, ins);
5822 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5824 if (cmethod->klass == mono_defaults.array_class) {
5825 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5826 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5827 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5828 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5829 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5830 return emit_array_unsafe_mov (cfg, fsig, args);
5837 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5839 MonoInst *ins = NULL;
5841 static MonoClass *runtime_helpers_class = NULL;
5842 if (! runtime_helpers_class)
5843 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5844 "System.Runtime.CompilerServices", "RuntimeHelpers");
5846 if (cmethod->klass == mono_defaults.string_class) {
5847 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5848 int dreg = alloc_ireg (cfg);
5849 int index_reg = alloc_preg (cfg);
5850 int add_reg = alloc_preg (cfg);
5852 #if SIZEOF_REGISTER == 8
5853 /* The array reg is 64 bits but the index reg is only 32 */
5854 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5856 index_reg = args [1]->dreg;
5858 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5860 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5861 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5862 add_reg = ins->dreg;
5863 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5866 int mult_reg = alloc_preg (cfg);
5867 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5868 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5869 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5870 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5872 type_from_op (cfg, ins, NULL, NULL);
5874 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5875 int dreg = alloc_ireg (cfg);
5876 /* Decompose later to allow more optimizations */
5877 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5878 ins->type = STACK_I4;
5879 ins->flags |= MONO_INST_FAULT;
5880 cfg->cbb->has_array_access = TRUE;
5881 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5886 } else if (cmethod->klass == mono_defaults.object_class) {
5888 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5889 int dreg = alloc_ireg_ref (cfg);
5890 int vt_reg = alloc_preg (cfg);
5891 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5892 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5893 type_from_op (cfg, ins, NULL, NULL);
5896 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5897 int dreg = alloc_ireg (cfg);
5898 int t1 = alloc_ireg (cfg);
5900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5901 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5902 ins->type = STACK_I4;
5905 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5906 MONO_INST_NEW (cfg, ins, OP_NOP);
5907 MONO_ADD_INS (cfg->cbb, ins);
5911 } else if (cmethod->klass == mono_defaults.array_class) {
5912 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5913 return emit_array_generic_access (cfg, fsig, args, FALSE);
5914 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5915 return emit_array_generic_access (cfg, fsig, args, TRUE);
5917 #ifndef MONO_BIG_ARRAYS
5919 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5922 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5923 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5924 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5925 int dreg = alloc_ireg (cfg);
5926 int bounds_reg = alloc_ireg_mp (cfg);
5927 MonoBasicBlock *end_bb, *szarray_bb;
5928 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5930 NEW_BBLOCK (cfg, end_bb);
5931 NEW_BBLOCK (cfg, szarray_bb);
5933 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5934 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5935 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5936 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5937 /* Non-szarray case */
5939 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5940 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5942 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5943 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5945 MONO_START_BB (cfg, szarray_bb);
5948 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5949 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5951 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5952 MONO_START_BB (cfg, end_bb);
5954 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5955 ins->type = STACK_I4;
5961 if (cmethod->name [0] != 'g')
5964 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5965 int dreg = alloc_ireg (cfg);
5966 int vtable_reg = alloc_preg (cfg);
5967 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5968 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5969 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5970 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5971 type_from_op (cfg, ins, NULL, NULL);
5974 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5975 int dreg = alloc_ireg (cfg);
5977 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5978 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5979 type_from_op (cfg, ins, NULL, NULL);
5984 } else if (cmethod->klass == runtime_helpers_class) {
5986 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5987 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5991 } else if (cmethod->klass == mono_defaults.thread_class) {
5992 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5993 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5994 MONO_ADD_INS (cfg->cbb, ins);
5996 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5997 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5998 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6000 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6002 if (fsig->params [0]->type == MONO_TYPE_I1)
6003 opcode = OP_LOADI1_MEMBASE;
6004 else if (fsig->params [0]->type == MONO_TYPE_U1)
6005 opcode = OP_LOADU1_MEMBASE;
6006 else if (fsig->params [0]->type == MONO_TYPE_I2)
6007 opcode = OP_LOADI2_MEMBASE;
6008 else if (fsig->params [0]->type == MONO_TYPE_U2)
6009 opcode = OP_LOADU2_MEMBASE;
6010 else if (fsig->params [0]->type == MONO_TYPE_I4)
6011 opcode = OP_LOADI4_MEMBASE;
6012 else if (fsig->params [0]->type == MONO_TYPE_U4)
6013 opcode = OP_LOADU4_MEMBASE;
6014 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6015 opcode = OP_LOADI8_MEMBASE;
6016 else if (fsig->params [0]->type == MONO_TYPE_R4)
6017 opcode = OP_LOADR4_MEMBASE;
6018 else if (fsig->params [0]->type == MONO_TYPE_R8)
6019 opcode = OP_LOADR8_MEMBASE;
6020 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6021 opcode = OP_LOAD_MEMBASE;
6024 MONO_INST_NEW (cfg, ins, opcode);
6025 ins->inst_basereg = args [0]->dreg;
6026 ins->inst_offset = 0;
6027 MONO_ADD_INS (cfg->cbb, ins);
6029 switch (fsig->params [0]->type) {
6036 ins->dreg = mono_alloc_ireg (cfg);
6037 ins->type = STACK_I4;
6041 ins->dreg = mono_alloc_lreg (cfg);
6042 ins->type = STACK_I8;
6046 ins->dreg = mono_alloc_ireg (cfg);
6047 #if SIZEOF_REGISTER == 8
6048 ins->type = STACK_I8;
6050 ins->type = STACK_I4;
6055 ins->dreg = mono_alloc_freg (cfg);
6056 ins->type = STACK_R8;
6059 g_assert (mini_type_is_reference (fsig->params [0]));
6060 ins->dreg = mono_alloc_ireg_ref (cfg);
6061 ins->type = STACK_OBJ;
6065 if (opcode == OP_LOADI8_MEMBASE)
6066 ins = mono_decompose_opcode (cfg, ins);
6068 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
6072 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6074 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6076 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6077 opcode = OP_STOREI1_MEMBASE_REG;
6078 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6079 opcode = OP_STOREI2_MEMBASE_REG;
6080 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6081 opcode = OP_STOREI4_MEMBASE_REG;
6082 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6083 opcode = OP_STOREI8_MEMBASE_REG;
6084 else if (fsig->params [0]->type == MONO_TYPE_R4)
6085 opcode = OP_STORER4_MEMBASE_REG;
6086 else if (fsig->params [0]->type == MONO_TYPE_R8)
6087 opcode = OP_STORER8_MEMBASE_REG;
6088 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6089 opcode = OP_STORE_MEMBASE_REG;
6092 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6094 MONO_INST_NEW (cfg, ins, opcode);
6095 ins->sreg1 = args [1]->dreg;
6096 ins->inst_destbasereg = args [0]->dreg;
6097 ins->inst_offset = 0;
6098 MONO_ADD_INS (cfg->cbb, ins);
6100 if (opcode == OP_STOREI8_MEMBASE_REG)
6101 ins = mono_decompose_opcode (cfg, ins);
6106 } else if (cmethod->klass->image == mono_defaults.corlib &&
6107 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6108 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6111 #if SIZEOF_REGISTER == 8
6112 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6113 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6114 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6115 ins->dreg = mono_alloc_preg (cfg);
6116 ins->sreg1 = args [0]->dreg;
6117 ins->type = STACK_I8;
6118 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6119 MONO_ADD_INS (cfg->cbb, ins);
6123 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6125 /* 64 bit reads are already atomic */
6126 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6127 load_ins->dreg = mono_alloc_preg (cfg);
6128 load_ins->inst_basereg = args [0]->dreg;
6129 load_ins->inst_offset = 0;
6130 load_ins->type = STACK_I8;
6131 MONO_ADD_INS (cfg->cbb, load_ins);
6133 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6140 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6141 MonoInst *ins_iconst;
6144 if (fsig->params [0]->type == MONO_TYPE_I4) {
6145 opcode = OP_ATOMIC_ADD_I4;
6146 cfg->has_atomic_add_i4 = TRUE;
6148 #if SIZEOF_REGISTER == 8
6149 else if (fsig->params [0]->type == MONO_TYPE_I8)
6150 opcode = OP_ATOMIC_ADD_I8;
6153 if (!mono_arch_opcode_supported (opcode))
6155 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6156 ins_iconst->inst_c0 = 1;
6157 ins_iconst->dreg = mono_alloc_ireg (cfg);
6158 MONO_ADD_INS (cfg->cbb, ins_iconst);
6160 MONO_INST_NEW (cfg, ins, opcode);
6161 ins->dreg = mono_alloc_ireg (cfg);
6162 ins->inst_basereg = args [0]->dreg;
6163 ins->inst_offset = 0;
6164 ins->sreg2 = ins_iconst->dreg;
6165 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6166 MONO_ADD_INS (cfg->cbb, ins);
6168 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6169 MonoInst *ins_iconst;
6172 if (fsig->params [0]->type == MONO_TYPE_I4) {
6173 opcode = OP_ATOMIC_ADD_I4;
6174 cfg->has_atomic_add_i4 = TRUE;
6176 #if SIZEOF_REGISTER == 8
6177 else if (fsig->params [0]->type == MONO_TYPE_I8)
6178 opcode = OP_ATOMIC_ADD_I8;
6181 if (!mono_arch_opcode_supported (opcode))
6183 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6184 ins_iconst->inst_c0 = -1;
6185 ins_iconst->dreg = mono_alloc_ireg (cfg);
6186 MONO_ADD_INS (cfg->cbb, ins_iconst);
6188 MONO_INST_NEW (cfg, ins, opcode);
6189 ins->dreg = mono_alloc_ireg (cfg);
6190 ins->inst_basereg = args [0]->dreg;
6191 ins->inst_offset = 0;
6192 ins->sreg2 = ins_iconst->dreg;
6193 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6194 MONO_ADD_INS (cfg->cbb, ins);
6196 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6199 if (fsig->params [0]->type == MONO_TYPE_I4) {
6200 opcode = OP_ATOMIC_ADD_I4;
6201 cfg->has_atomic_add_i4 = TRUE;
6203 #if SIZEOF_REGISTER == 8
6204 else if (fsig->params [0]->type == MONO_TYPE_I8)
6205 opcode = OP_ATOMIC_ADD_I8;
6208 if (!mono_arch_opcode_supported (opcode))
6210 MONO_INST_NEW (cfg, ins, opcode);
6211 ins->dreg = mono_alloc_ireg (cfg);
6212 ins->inst_basereg = args [0]->dreg;
6213 ins->inst_offset = 0;
6214 ins->sreg2 = args [1]->dreg;
6215 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6216 MONO_ADD_INS (cfg->cbb, ins);
6219 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6220 MonoInst *f2i = NULL, *i2f;
6221 guint32 opcode, f2i_opcode, i2f_opcode;
6222 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6223 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6225 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6226 fsig->params [0]->type == MONO_TYPE_R4) {
6227 opcode = OP_ATOMIC_EXCHANGE_I4;
6228 f2i_opcode = OP_MOVE_F_TO_I4;
6229 i2f_opcode = OP_MOVE_I4_TO_F;
6230 cfg->has_atomic_exchange_i4 = TRUE;
6232 #if SIZEOF_REGISTER == 8
6234 fsig->params [0]->type == MONO_TYPE_I8 ||
6235 fsig->params [0]->type == MONO_TYPE_R8 ||
6236 fsig->params [0]->type == MONO_TYPE_I) {
6237 opcode = OP_ATOMIC_EXCHANGE_I8;
6238 f2i_opcode = OP_MOVE_F_TO_I8;
6239 i2f_opcode = OP_MOVE_I8_TO_F;
6242 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6243 opcode = OP_ATOMIC_EXCHANGE_I4;
6244 cfg->has_atomic_exchange_i4 = TRUE;
6250 if (!mono_arch_opcode_supported (opcode))
6254 /* TODO: Decompose these opcodes instead of bailing here. */
6255 if (COMPILE_SOFT_FLOAT (cfg))
6258 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6259 f2i->dreg = mono_alloc_ireg (cfg);
6260 f2i->sreg1 = args [1]->dreg;
6261 if (f2i_opcode == OP_MOVE_F_TO_I4)
6262 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6263 MONO_ADD_INS (cfg->cbb, f2i);
6266 MONO_INST_NEW (cfg, ins, opcode);
6267 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6268 ins->inst_basereg = args [0]->dreg;
6269 ins->inst_offset = 0;
6270 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6271 MONO_ADD_INS (cfg->cbb, ins);
6273 switch (fsig->params [0]->type) {
6275 ins->type = STACK_I4;
6278 ins->type = STACK_I8;
6281 #if SIZEOF_REGISTER == 8
6282 ins->type = STACK_I8;
6284 ins->type = STACK_I4;
6289 ins->type = STACK_R8;
6292 g_assert (mini_type_is_reference (fsig->params [0]));
6293 ins->type = STACK_OBJ;
6298 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6299 i2f->dreg = mono_alloc_freg (cfg);
6300 i2f->sreg1 = ins->dreg;
6301 i2f->type = STACK_R8;
6302 if (i2f_opcode == OP_MOVE_I4_TO_F)
6303 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6304 MONO_ADD_INS (cfg->cbb, i2f);
6309 if (cfg->gen_write_barriers && is_ref)
6310 emit_write_barrier (cfg, args [0], args [1]);
6312 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6313 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6314 guint32 opcode, f2i_opcode, i2f_opcode;
6315 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6316 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6318 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6319 fsig->params [1]->type == MONO_TYPE_R4) {
6320 opcode = OP_ATOMIC_CAS_I4;
6321 f2i_opcode = OP_MOVE_F_TO_I4;
6322 i2f_opcode = OP_MOVE_I4_TO_F;
6323 cfg->has_atomic_cas_i4 = TRUE;
6325 #if SIZEOF_REGISTER == 8
6327 fsig->params [1]->type == MONO_TYPE_I8 ||
6328 fsig->params [1]->type == MONO_TYPE_R8 ||
6329 fsig->params [1]->type == MONO_TYPE_I) {
6330 opcode = OP_ATOMIC_CAS_I8;
6331 f2i_opcode = OP_MOVE_F_TO_I8;
6332 i2f_opcode = OP_MOVE_I8_TO_F;
6335 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6336 opcode = OP_ATOMIC_CAS_I4;
6337 cfg->has_atomic_cas_i4 = TRUE;
6343 if (!mono_arch_opcode_supported (opcode))
6347 /* TODO: Decompose these opcodes instead of bailing here. */
6348 if (COMPILE_SOFT_FLOAT (cfg))
6351 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6352 f2i_new->dreg = mono_alloc_ireg (cfg);
6353 f2i_new->sreg1 = args [1]->dreg;
6354 if (f2i_opcode == OP_MOVE_F_TO_I4)
6355 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6356 MONO_ADD_INS (cfg->cbb, f2i_new);
6358 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6359 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6360 f2i_cmp->sreg1 = args [2]->dreg;
6361 if (f2i_opcode == OP_MOVE_F_TO_I4)
6362 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6363 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6366 MONO_INST_NEW (cfg, ins, opcode);
6367 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6368 ins->sreg1 = args [0]->dreg;
6369 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6370 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6371 MONO_ADD_INS (cfg->cbb, ins);
6373 switch (fsig->params [1]->type) {
6375 ins->type = STACK_I4;
6378 ins->type = STACK_I8;
6381 #if SIZEOF_REGISTER == 8
6382 ins->type = STACK_I8;
6384 ins->type = STACK_I4;
6388 ins->type = cfg->r4_stack_type;
6391 ins->type = STACK_R8;
6394 g_assert (mini_type_is_reference (fsig->params [1]));
6395 ins->type = STACK_OBJ;
6400 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6401 i2f->dreg = mono_alloc_freg (cfg);
6402 i2f->sreg1 = ins->dreg;
6403 i2f->type = STACK_R8;
6404 if (i2f_opcode == OP_MOVE_I4_TO_F)
6405 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6406 MONO_ADD_INS (cfg->cbb, i2f);
6411 if (cfg->gen_write_barriers && is_ref)
6412 emit_write_barrier (cfg, args [0], args [1]);
6414 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6415 fsig->params [1]->type == MONO_TYPE_I4) {
6416 MonoInst *cmp, *ceq;
6418 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6421 /* int32 r = CAS (location, value, comparand); */
6422 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6423 ins->dreg = alloc_ireg (cfg);
6424 ins->sreg1 = args [0]->dreg;
6425 ins->sreg2 = args [1]->dreg;
6426 ins->sreg3 = args [2]->dreg;
6427 ins->type = STACK_I4;
6428 MONO_ADD_INS (cfg->cbb, ins);
6430 /* bool result = r == comparand; */
6431 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6432 cmp->sreg1 = ins->dreg;
6433 cmp->sreg2 = args [2]->dreg;
6434 cmp->type = STACK_I4;
6435 MONO_ADD_INS (cfg->cbb, cmp);
6437 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6438 ceq->dreg = alloc_ireg (cfg);
6439 ceq->type = STACK_I4;
6440 MONO_ADD_INS (cfg->cbb, ceq);
6442 /* *success = result; */
6443 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6445 cfg->has_atomic_cas_i4 = TRUE;
6447 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6448 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6452 } else if (cmethod->klass->image == mono_defaults.corlib &&
6453 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6454 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6457 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6459 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6460 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6462 if (fsig->params [0]->type == MONO_TYPE_I1)
6463 opcode = OP_ATOMIC_LOAD_I1;
6464 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6465 opcode = OP_ATOMIC_LOAD_U1;
6466 else if (fsig->params [0]->type == MONO_TYPE_I2)
6467 opcode = OP_ATOMIC_LOAD_I2;
6468 else if (fsig->params [0]->type == MONO_TYPE_U2)
6469 opcode = OP_ATOMIC_LOAD_U2;
6470 else if (fsig->params [0]->type == MONO_TYPE_I4)
6471 opcode = OP_ATOMIC_LOAD_I4;
6472 else if (fsig->params [0]->type == MONO_TYPE_U4)
6473 opcode = OP_ATOMIC_LOAD_U4;
6474 else if (fsig->params [0]->type == MONO_TYPE_R4)
6475 opcode = OP_ATOMIC_LOAD_R4;
6476 else if (fsig->params [0]->type == MONO_TYPE_R8)
6477 opcode = OP_ATOMIC_LOAD_R8;
6478 #if SIZEOF_REGISTER == 8
6479 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6480 opcode = OP_ATOMIC_LOAD_I8;
6481 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6482 opcode = OP_ATOMIC_LOAD_U8;
6484 else if (fsig->params [0]->type == MONO_TYPE_I)
6485 opcode = OP_ATOMIC_LOAD_I4;
6486 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6487 opcode = OP_ATOMIC_LOAD_U4;
6491 if (!mono_arch_opcode_supported (opcode))
6494 MONO_INST_NEW (cfg, ins, opcode);
6495 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6496 ins->sreg1 = args [0]->dreg;
6497 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6498 MONO_ADD_INS (cfg->cbb, ins);
6500 switch (fsig->params [0]->type) {
6501 case MONO_TYPE_BOOLEAN:
6508 ins->type = STACK_I4;
6512 ins->type = STACK_I8;
6516 #if SIZEOF_REGISTER == 8
6517 ins->type = STACK_I8;
6519 ins->type = STACK_I4;
6523 ins->type = cfg->r4_stack_type;
6526 ins->type = STACK_R8;
6529 g_assert (mini_type_is_reference (fsig->params [0]));
6530 ins->type = STACK_OBJ;
6536 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6538 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6540 if (fsig->params [0]->type == MONO_TYPE_I1)
6541 opcode = OP_ATOMIC_STORE_I1;
6542 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6543 opcode = OP_ATOMIC_STORE_U1;
6544 else if (fsig->params [0]->type == MONO_TYPE_I2)
6545 opcode = OP_ATOMIC_STORE_I2;
6546 else if (fsig->params [0]->type == MONO_TYPE_U2)
6547 opcode = OP_ATOMIC_STORE_U2;
6548 else if (fsig->params [0]->type == MONO_TYPE_I4)
6549 opcode = OP_ATOMIC_STORE_I4;
6550 else if (fsig->params [0]->type == MONO_TYPE_U4)
6551 opcode = OP_ATOMIC_STORE_U4;
6552 else if (fsig->params [0]->type == MONO_TYPE_R4)
6553 opcode = OP_ATOMIC_STORE_R4;
6554 else if (fsig->params [0]->type == MONO_TYPE_R8)
6555 opcode = OP_ATOMIC_STORE_R8;
6556 #if SIZEOF_REGISTER == 8
6557 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6558 opcode = OP_ATOMIC_STORE_I8;
6559 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6560 opcode = OP_ATOMIC_STORE_U8;
6562 else if (fsig->params [0]->type == MONO_TYPE_I)
6563 opcode = OP_ATOMIC_STORE_I4;
6564 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6565 opcode = OP_ATOMIC_STORE_U4;
6569 if (!mono_arch_opcode_supported (opcode))
6572 MONO_INST_NEW (cfg, ins, opcode);
6573 ins->dreg = args [0]->dreg;
6574 ins->sreg1 = args [1]->dreg;
6575 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6576 MONO_ADD_INS (cfg->cbb, ins);
6578 if (cfg->gen_write_barriers && is_ref)
6579 emit_write_barrier (cfg, args [0], args [1]);
6585 } else if (cmethod->klass->image == mono_defaults.corlib &&
6586 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6587 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6588 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6589 if (should_insert_brekpoint (cfg->method)) {
6590 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6592 MONO_INST_NEW (cfg, ins, OP_NOP);
6593 MONO_ADD_INS (cfg->cbb, ins);
6597 } else if (cmethod->klass->image == mono_defaults.corlib &&
6598 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6599 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6600 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6602 EMIT_NEW_ICONST (cfg, ins, 1);
6604 EMIT_NEW_ICONST (cfg, ins, 0);
6607 } else if (cmethod->klass->image == mono_defaults.corlib &&
6608 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6609 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6610 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6611 /* No stack walks are current available, so implement this as an intrinsic */
6612 MonoInst *assembly_ins;
6614 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6615 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6618 } else if (cmethod->klass == mono_defaults.math_class) {
6620 * There is general branchless code for Min/Max, but it does not work for
6622 * http://everything2.com/?node_id=1051618
6624 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6625 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6626 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6627 !strcmp (cmethod->klass->name, "Selector")) ||
6628 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6629 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6630 !strcmp (cmethod->klass->name, "Selector"))
6632 if (cfg->backend->have_objc_get_selector &&
6633 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6634 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6637 MonoJumpInfoToken *ji;
6640 cfg->disable_llvm = TRUE;
6642 if (args [0]->opcode == OP_GOT_ENTRY) {
6643 pi = args [0]->inst_p1;
6644 g_assert (pi->opcode == OP_PATCH_INFO);
6645 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6648 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6649 ji = args [0]->inst_p0;
6652 NULLIFY_INS (args [0]);
6655 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6656 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6657 ins->dreg = mono_alloc_ireg (cfg);
6659 ins->inst_p0 = mono_string_to_utf8 (s);
6660 MONO_ADD_INS (cfg->cbb, ins);
6665 #ifdef MONO_ARCH_SIMD_INTRINSICS
6666 if (cfg->opt & MONO_OPT_SIMD) {
6667 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6673 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6677 if (COMPILE_LLVM (cfg)) {
6678 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6683 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6687 * This entry point could be used later for arbitrary method
6690 inline static MonoInst*
6691 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6692 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6694 if (method->klass == mono_defaults.string_class) {
6695 /* managed string allocation support */
6696 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6697 MonoInst *iargs [2];
6698 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6699 MonoMethod *managed_alloc = NULL;
6701 g_assert (vtable); /*Should not fail since it System.String*/
6702 #ifndef MONO_CROSS_COMPILE
6703 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6707 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6708 iargs [1] = args [0];
6709 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6716 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6718 MonoInst *store, *temp;
6721 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6722 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6725 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6726 * would be different than the MonoInst's used to represent arguments, and
6727 * the ldelema implementation can't deal with that.
6728 * Solution: When ldelema is used on an inline argument, create a var for
6729 * it, emit ldelema on that var, and emit the saving code below in
6730 * inline_method () if needed.
6732 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6733 cfg->args [i] = temp;
6734 /* This uses cfg->args [i] which is set by the preceeding line */
6735 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6736 store->cil_code = sp [0]->cil_code;
6741 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6742 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6744 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6746 check_inline_called_method_name_limit (MonoMethod *called_method)
6749 static const char *limit = NULL;
6751 if (limit == NULL) {
6752 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6754 if (limit_string != NULL)
6755 limit = limit_string;
6760 if (limit [0] != '\0') {
6761 char *called_method_name = mono_method_full_name (called_method, TRUE);
6763 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6764 g_free (called_method_name);
6766 //return (strncmp_result <= 0);
6767 return (strncmp_result == 0);
6774 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6776 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6779 static const char *limit = NULL;
6781 if (limit == NULL) {
6782 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6783 if (limit_string != NULL) {
6784 limit = limit_string;
6790 if (limit [0] != '\0') {
6791 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6793 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6794 g_free (caller_method_name);
6796 //return (strncmp_result <= 0);
6797 return (strncmp_result == 0);
6805 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6807 static double r8_0 = 0.0;
6808 static float r4_0 = 0.0;
6812 rtype = mini_get_underlying_type (rtype);
6816 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6817 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6818 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6819 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6820 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6821 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6822 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6823 ins->type = STACK_R4;
6824 ins->inst_p0 = (void*)&r4_0;
6826 MONO_ADD_INS (cfg->cbb, ins);
6827 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6828 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6829 ins->type = STACK_R8;
6830 ins->inst_p0 = (void*)&r8_0;
6832 MONO_ADD_INS (cfg->cbb, ins);
6833 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6834 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6835 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6836 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6837 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6839 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6844 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6848 rtype = mini_get_underlying_type (rtype);
6852 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6853 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6854 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6855 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6856 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6857 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6858 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6859 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6860 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6861 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6862 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6863 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6864 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6865 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6867 emit_init_rvar (cfg, dreg, rtype);
6871 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6873 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6875 MonoInst *var = cfg->locals [local];
6876 if (COMPILE_SOFT_FLOAT (cfg)) {
6878 int reg = alloc_dreg (cfg, var->type);
6879 emit_init_rvar (cfg, reg, type);
6880 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6883 emit_init_rvar (cfg, var->dreg, type);
6885 emit_dummy_init_rvar (cfg, var->dreg, type);
6892 * Return the cost of inlining CMETHOD.
6895 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6896 guchar *ip, guint real_offset, gboolean inline_always)
6898 MonoInst *ins, *rvar = NULL;
6899 MonoMethodHeader *cheader;
6900 MonoBasicBlock *ebblock, *sbblock;
6902 MonoMethod *prev_inlined_method;
6903 MonoInst **prev_locals, **prev_args;
6904 MonoType **prev_arg_types;
6905 guint prev_real_offset;
6906 GHashTable *prev_cbb_hash;
6907 MonoBasicBlock **prev_cil_offset_to_bb;
6908 MonoBasicBlock *prev_cbb;
6909 unsigned char* prev_cil_start;
6910 guint32 prev_cil_offset_to_bb_len;
6911 MonoMethod *prev_current_method;
6912 MonoGenericContext *prev_generic_context;
6913 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6915 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6917 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6918 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6921 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6922 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6927 fsig = mono_method_signature (cmethod);
6929 if (cfg->verbose_level > 2)
6930 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6932 if (!cmethod->inline_info) {
6933 cfg->stat_inlineable_methods++;
6934 cmethod->inline_info = 1;
6937 /* allocate local variables */
6938 cheader = mono_method_get_header (cmethod);
6940 if (cheader == NULL || mono_loader_get_last_error ()) {
6941 MonoLoaderError *error = mono_loader_get_last_error ();
6944 mono_metadata_free_mh (cheader);
6945 if (inline_always && error)
6946 mono_cfg_set_exception (cfg, error->exception_type);
6948 mono_loader_clear_error ();
6952 /*Must verify before creating locals as it can cause the JIT to assert.*/
6953 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6954 mono_metadata_free_mh (cheader);
6958 /* allocate space to store the return value */
6959 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6960 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6963 prev_locals = cfg->locals;
6964 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6965 for (i = 0; i < cheader->num_locals; ++i)
6966 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6968 /* allocate start and end blocks */
6969 /* This is needed so if the inline is aborted, we can clean up */
6970 NEW_BBLOCK (cfg, sbblock);
6971 sbblock->real_offset = real_offset;
6973 NEW_BBLOCK (cfg, ebblock);
6974 ebblock->block_num = cfg->num_bblocks++;
6975 ebblock->real_offset = real_offset;
6977 prev_args = cfg->args;
6978 prev_arg_types = cfg->arg_types;
6979 prev_inlined_method = cfg->inlined_method;
6980 cfg->inlined_method = cmethod;
6981 cfg->ret_var_set = FALSE;
6982 cfg->inline_depth ++;
6983 prev_real_offset = cfg->real_offset;
6984 prev_cbb_hash = cfg->cbb_hash;
6985 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6986 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6987 prev_cil_start = cfg->cil_start;
6988 prev_cbb = cfg->cbb;
6989 prev_current_method = cfg->current_method;
6990 prev_generic_context = cfg->generic_context;
6991 prev_ret_var_set = cfg->ret_var_set;
6992 prev_disable_inline = cfg->disable_inline;
6994 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6997 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6999 ret_var_set = cfg->ret_var_set;
7001 cfg->inlined_method = prev_inlined_method;
7002 cfg->real_offset = prev_real_offset;
7003 cfg->cbb_hash = prev_cbb_hash;
7004 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7005 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7006 cfg->cil_start = prev_cil_start;
7007 cfg->locals = prev_locals;
7008 cfg->args = prev_args;
7009 cfg->arg_types = prev_arg_types;
7010 cfg->current_method = prev_current_method;
7011 cfg->generic_context = prev_generic_context;
7012 cfg->ret_var_set = prev_ret_var_set;
7013 cfg->disable_inline = prev_disable_inline;
7014 cfg->inline_depth --;
7016 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7017 if (cfg->verbose_level > 2)
7018 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7020 cfg->stat_inlined_methods++;
7022 /* always add some code to avoid block split failures */
7023 MONO_INST_NEW (cfg, ins, OP_NOP);
7024 MONO_ADD_INS (prev_cbb, ins);
7026 prev_cbb->next_bb = sbblock;
7027 link_bblock (cfg, prev_cbb, sbblock);
7030 * Get rid of the begin and end bblocks if possible to aid local
7033 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7035 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7036 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7038 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7039 MonoBasicBlock *prev = ebblock->in_bb [0];
7040 mono_merge_basic_blocks (cfg, prev, ebblock);
7042 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7043 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7044 cfg->cbb = prev_cbb;
7048 * Its possible that the rvar is set in some prev bblock, but not in others.
7054 for (i = 0; i < ebblock->in_count; ++i) {
7055 bb = ebblock->in_bb [i];
7057 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7060 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7070 * If the inlined method contains only a throw, then the ret var is not
7071 * set, so set it to a dummy value.
7074 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7076 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7079 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7082 if (cfg->verbose_level > 2)
7083 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7084 cfg->exception_type = MONO_EXCEPTION_NONE;
7085 mono_loader_clear_error ();
7087 /* This gets rid of the newly added bblocks */
7088 cfg->cbb = prev_cbb;
7090 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7095 * Some of these comments may well be out-of-date.
7096 * Design decisions: we do a single pass over the IL code (and we do bblock
7097 * splitting/merging in the few cases when it's required: a back jump to an IL
7098 * address that was not already seen as bblock starting point).
7099 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7100 * Complex operations are decomposed in simpler ones right away. We need to let the
7101 * arch-specific code peek and poke inside this process somehow (except when the
7102 * optimizations can take advantage of the full semantic info of coarse opcodes).
7103 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7104 * MonoInst->opcode initially is the IL opcode or some simplification of that
7105 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7106 * opcode with value bigger than OP_LAST.
7107 * At this point the IR can be handed over to an interpreter, a dumb code generator
7108 * or to the optimizing code generator that will translate it to SSA form.
7110 * Profiling directed optimizations.
7111 * We may compile by default with few or no optimizations and instrument the code
7112 * or the user may indicate what methods to optimize the most either in a config file
7113 * or through repeated runs where the compiler applies offline the optimizations to
7114 * each method and then decides if it was worth it.
7117 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7118 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7119 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7120 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7121 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7122 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7123 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7124 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7126 /* offset from br.s -> br like opcodes */
7127 #define BIG_BRANCH_OFFSET 13
7130 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7132 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7134 return b == NULL || b == bb;
7138 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7140 unsigned char *ip = start;
7141 unsigned char *target;
7144 MonoBasicBlock *bblock;
7145 const MonoOpcode *opcode;
7148 cli_addr = ip - start;
7149 i = mono_opcode_value ((const guint8 **)&ip, end);
7152 opcode = &mono_opcodes [i];
7153 switch (opcode->argument) {
7154 case MonoInlineNone:
7157 case MonoInlineString:
7158 case MonoInlineType:
7159 case MonoInlineField:
7160 case MonoInlineMethod:
7163 case MonoShortInlineR:
7170 case MonoShortInlineVar:
7171 case MonoShortInlineI:
7174 case MonoShortInlineBrTarget:
7175 target = start + cli_addr + 2 + (signed char)ip [1];
7176 GET_BBLOCK (cfg, bblock, target);
7179 GET_BBLOCK (cfg, bblock, ip);
7181 case MonoInlineBrTarget:
7182 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7183 GET_BBLOCK (cfg, bblock, target);
7186 GET_BBLOCK (cfg, bblock, ip);
7188 case MonoInlineSwitch: {
7189 guint32 n = read32 (ip + 1);
7192 cli_addr += 5 + 4 * n;
7193 target = start + cli_addr;
7194 GET_BBLOCK (cfg, bblock, target);
7196 for (j = 0; j < n; ++j) {
7197 target = start + cli_addr + (gint32)read32 (ip);
7198 GET_BBLOCK (cfg, bblock, target);
7208 g_assert_not_reached ();
7211 if (i == CEE_THROW) {
7212 unsigned char *bb_start = ip - 1;
7214 /* Find the start of the bblock containing the throw */
7216 while ((bb_start >= start) && !bblock) {
7217 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7221 bblock->out_of_line = 1;
7231 static inline MonoMethod *
7232 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7236 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7237 method = mono_method_get_wrapper_data (m, token);
7240 method = mono_class_inflate_generic_method_checked (method, context, &error);
7241 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7244 method = mono_get_method_full (m->klass->image, token, klass, context);
7250 static inline MonoMethod *
7251 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7253 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7255 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7261 static inline MonoClass*
7262 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7267 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7268 klass = mono_method_get_wrapper_data (method, token);
7270 klass = mono_class_inflate_generic_class (klass, context);
7272 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7273 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7276 mono_class_init (klass);
7280 static inline MonoMethodSignature*
7281 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7283 MonoMethodSignature *fsig;
7285 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7286 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7288 fsig = mono_metadata_parse_signature (method->klass->image, token);
7292 fsig = mono_inflate_generic_signature(fsig, context, &error);
7294 g_assert(mono_error_ok(&error));
7300 throw_exception (void)
7302 static MonoMethod *method = NULL;
7305 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7306 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7313 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7315 MonoMethod *thrower = throw_exception ();
7318 EMIT_NEW_PCONST (cfg, args [0], ex);
7319 mono_emit_method_call (cfg, thrower, args, NULL);
7323 * Return the original method is a wrapper is specified. We can only access
7324 * the custom attributes from the original method.
7327 get_original_method (MonoMethod *method)
7329 if (method->wrapper_type == MONO_WRAPPER_NONE)
7332 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7333 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7336 /* in other cases we need to find the original method */
7337 return mono_marshal_method_from_wrapper (method);
7341 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7343 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7344 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7346 emit_throw_exception (cfg, ex);
7350 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7352 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7353 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7355 emit_throw_exception (cfg, ex);
7359 * Check that the IL instructions at ip are the array initialization
7360 * sequence and return the pointer to the data and the size.
7363 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7366 * newarr[System.Int32]
7368 * ldtoken field valuetype ...
7369 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7371 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7373 guint32 token = read32 (ip + 7);
7374 guint32 field_token = read32 (ip + 2);
7375 guint32 field_index = field_token & 0xffffff;
7377 const char *data_ptr;
7379 MonoMethod *cmethod;
7380 MonoClass *dummy_class;
7381 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7385 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7389 *out_field_token = field_token;
7391 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7394 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7396 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7397 case MONO_TYPE_BOOLEAN:
7401 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7402 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7403 case MONO_TYPE_CHAR:
7420 if (size > mono_type_size (field->type, &dummy_align))
7423 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7424 if (!image_is_dynamic (method->klass->image)) {
7425 field_index = read32 (ip + 2) & 0xffffff;
7426 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7427 data_ptr = mono_image_rva_map (method->klass->image, rva);
7428 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7429 /* for aot code we do the lookup on load */
7430 if (aot && data_ptr)
7431 return GUINT_TO_POINTER (rva);
7433 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7435 data_ptr = mono_field_get_data (field);
7443 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7445 char *method_fname = mono_method_full_name (method, TRUE);
7447 MonoMethodHeader *header = mono_method_get_header (method);
7449 if (header->code_size == 0)
7450 method_code = g_strdup ("method body is empty.");
7452 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7453 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7454 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7455 g_free (method_fname);
7456 g_free (method_code);
7457 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7461 set_exception_object (MonoCompile *cfg, MonoException *exception)
7463 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7464 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr, MONO_ROOT_SOURCE_JIT, "jit exception");
7465 cfg->exception_ptr = exception;
7469 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7472 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7473 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7474 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7475 /* Optimize reg-reg moves away */
7477 * Can't optimize other opcodes, since sp[0] might point to
7478 * the last ins of a decomposed opcode.
7480 sp [0]->dreg = (cfg)->locals [n]->dreg;
7482 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7487 * ldloca inhibits many optimizations so try to get rid of it in common
7490 static inline unsigned char *
7491 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7501 local = read16 (ip + 2);
7505 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7506 /* From the INITOBJ case */
7507 token = read32 (ip + 2);
7508 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7509 CHECK_TYPELOAD (klass);
7510 type = mini_get_underlying_type (&klass->byval_arg);
7511 emit_init_local (cfg, local, type, TRUE);
7519 is_exception_class (MonoClass *klass)
7522 if (klass == mono_defaults.exception_class)
7524 klass = klass->parent;
7530 * is_jit_optimizer_disabled:
7532 * Determine whenever M's assembly has a DebuggableAttribute with the
7533 * IsJITOptimizerDisabled flag set.
7536 is_jit_optimizer_disabled (MonoMethod *m)
7538 MonoAssembly *ass = m->klass->image->assembly;
7539 MonoCustomAttrInfo* attrs;
7540 static MonoClass *klass;
7542 gboolean val = FALSE;
7545 if (ass->jit_optimizer_disabled_inited)
7546 return ass->jit_optimizer_disabled;
7549 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7552 ass->jit_optimizer_disabled = FALSE;
7553 mono_memory_barrier ();
7554 ass->jit_optimizer_disabled_inited = TRUE;
7558 attrs = mono_custom_attrs_from_assembly (ass);
7560 for (i = 0; i < attrs->num_attrs; ++i) {
7561 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7563 MonoMethodSignature *sig;
7565 if (!attr->ctor || attr->ctor->klass != klass)
7567 /* Decode the attribute. See reflection.c */
7568 p = (const char*)attr->data;
7569 g_assert (read16 (p) == 0x0001);
7572 // FIXME: Support named parameters
7573 sig = mono_method_signature (attr->ctor);
7574 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7576 /* Two boolean arguments */
7580 mono_custom_attrs_free (attrs);
7583 ass->jit_optimizer_disabled = val;
7584 mono_memory_barrier ();
7585 ass->jit_optimizer_disabled_inited = TRUE;
7591 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7593 gboolean supported_tail_call;
7596 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7598 for (i = 0; i < fsig->param_count; ++i) {
7599 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7600 /* These can point to the current method's stack */
7601 supported_tail_call = FALSE;
7603 if (fsig->hasthis && cmethod->klass->valuetype)
7604 /* this might point to the current method's stack */
7605 supported_tail_call = FALSE;
7606 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7607 supported_tail_call = FALSE;
7608 if (cfg->method->save_lmf)
7609 supported_tail_call = FALSE;
7610 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7611 supported_tail_call = FALSE;
7612 if (call_opcode != CEE_CALL)
7613 supported_tail_call = FALSE;
7615 /* Debugging support */
7617 if (supported_tail_call) {
7618 if (!mono_debug_count ())
7619 supported_tail_call = FALSE;
7623 return supported_tail_call;
7629 * Handle calls made to ctors from NEWOBJ opcodes.
7632 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7633 MonoInst **sp, guint8 *ip, int *inline_costs)
7635 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7637 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7638 mono_method_is_generic_sharable (cmethod, TRUE)) {
7639 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7640 mono_class_vtable (cfg->domain, cmethod->klass);
7641 CHECK_TYPELOAD (cmethod->klass);
7643 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7644 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7647 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7648 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7650 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7652 CHECK_TYPELOAD (cmethod->klass);
7653 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7658 /* Avoid virtual calls to ctors if possible */
7659 if (mono_class_is_marshalbyref (cmethod->klass))
7660 callvirt_this_arg = sp [0];
7662 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7663 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7664 CHECK_CFG_EXCEPTION;
7665 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7666 mono_method_check_inlining (cfg, cmethod) &&
7667 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7670 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7671 cfg->real_offset += 5;
7673 *inline_costs += costs - 5;
7675 INLINE_FAILURE ("inline failure");
7676 // FIXME-VT: Clean this up
7677 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7678 GSHAREDVT_FAILURE(*ip);
7679 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7681 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7684 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7685 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7686 } else if (context_used &&
7687 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7688 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7689 MonoInst *cmethod_addr;
7691 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7693 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7694 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7696 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7698 INLINE_FAILURE ("ctor call");
7699 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7700 callvirt_this_arg, NULL, vtable_arg);
7707 emit_setret (MonoCompile *cfg, MonoInst *val)
7709 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7712 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7715 if (!cfg->vret_addr) {
7716 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7718 EMIT_NEW_RETLOADA (cfg, ret_addr);
7720 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7721 ins->klass = mono_class_from_mono_type (ret_type);
7724 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7725 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7726 MonoInst *iargs [1];
7730 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7731 mono_arch_emit_setret (cfg, cfg->method, conv);
7733 mono_arch_emit_setret (cfg, cfg->method, val);
7736 mono_arch_emit_setret (cfg, cfg->method, val);
7741 static MonoMethodSignature*
7742 sig_to_rgctx_sig (MonoMethodSignature *sig)
7744 // FIXME: memory allocation
7745 MonoMethodSignature *res;
7748 res = g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
7749 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
7750 res->param_count = sig->param_count + 1;
7751 for (i = 0; i < sig->param_count; ++i)
7752 res->params [i] = sig->params [i];
7753 res->params [sig->param_count] = &mono_defaults.int_class->byval_arg;
7758 * mono_method_to_ir:
7760 * Translate the .net IL into linear IR.
7763 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7764 MonoInst *return_var, MonoInst **inline_args,
7765 guint inline_offset, gboolean is_virtual_call)
7768 MonoInst *ins, **sp, **stack_start;
7769 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7770 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7771 MonoMethod *cmethod, *method_definition;
7772 MonoInst **arg_array;
7773 MonoMethodHeader *header;
7775 guint32 token, ins_flag;
7777 MonoClass *constrained_class = NULL;
7778 unsigned char *ip, *end, *target, *err_pos;
7779 MonoMethodSignature *sig;
7780 MonoGenericContext *generic_context = NULL;
7781 MonoGenericContainer *generic_container = NULL;
7782 MonoType **param_types;
7783 int i, n, start_new_bblock, dreg;
7784 int num_calls = 0, inline_costs = 0;
7785 int breakpoint_id = 0;
7787 GSList *class_inits = NULL;
7788 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7790 gboolean init_locals, seq_points, skip_dead_blocks;
7791 gboolean sym_seq_points = FALSE;
7792 MonoDebugMethodInfo *minfo;
7793 MonoBitSet *seq_point_locs = NULL;
7794 MonoBitSet *seq_point_set_locs = NULL;
7796 cfg->disable_inline = is_jit_optimizer_disabled (method);
7798 /* serialization and xdomain stuff may need access to private fields and methods */
7799 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7800 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7801 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7802 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7803 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7804 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7806 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7807 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7808 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7809 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7810 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7812 image = method->klass->image;
7813 header = mono_method_get_header (method);
7815 MonoLoaderError *error;
7817 if ((error = mono_loader_get_last_error ())) {
7818 mono_cfg_set_exception (cfg, error->exception_type);
7820 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7821 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7823 goto exception_exit;
7825 generic_container = mono_method_get_generic_container (method);
7826 sig = mono_method_signature (method);
7827 num_args = sig->hasthis + sig->param_count;
7828 ip = (unsigned char*)header->code;
7829 cfg->cil_start = ip;
7830 end = ip + header->code_size;
7831 cfg->stat_cil_code_size += header->code_size;
7833 seq_points = cfg->gen_seq_points && cfg->method == method;
7835 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7836 /* We could hit a seq point before attaching to the JIT (#8338) */
7840 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7841 minfo = mono_debug_lookup_method (method);
7843 MonoSymSeqPoint *sps;
7844 int i, n_il_offsets;
7846 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7847 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7848 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7849 sym_seq_points = TRUE;
7850 for (i = 0; i < n_il_offsets; ++i) {
7851 if (sps [i].il_offset < header->code_size)
7852 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7855 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7856 /* Methods without line number info like auto-generated property accessors */
7857 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7858 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7859 sym_seq_points = TRUE;
7864 * Methods without init_locals set could cause asserts in various passes
7865 * (#497220). To work around this, we emit dummy initialization opcodes
7866 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7867 * on some platforms.
7869 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7870 init_locals = header->init_locals;
7874 method_definition = method;
7875 while (method_definition->is_inflated) {
7876 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7877 method_definition = imethod->declaring;
7880 /* SkipVerification is not allowed if core-clr is enabled */
7881 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7883 dont_verify_stloc = TRUE;
7886 if (sig->is_inflated)
7887 generic_context = mono_method_get_context (method);
7888 else if (generic_container)
7889 generic_context = &generic_container->context;
7890 cfg->generic_context = generic_context;
7893 g_assert (!sig->has_type_parameters);
7895 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7896 g_assert (method->is_inflated);
7897 g_assert (mono_method_get_context (method)->method_inst);
7899 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7900 g_assert (sig->generic_param_count);
7902 if (cfg->method == method) {
7903 cfg->real_offset = 0;
7905 cfg->real_offset = inline_offset;
7908 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7909 cfg->cil_offset_to_bb_len = header->code_size;
7911 cfg->current_method = method;
7913 if (cfg->verbose_level > 2)
7914 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7916 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7918 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7919 for (n = 0; n < sig->param_count; ++n)
7920 param_types [n + sig->hasthis] = sig->params [n];
7921 cfg->arg_types = param_types;
7923 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7924 if (cfg->method == method) {
7926 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7927 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7930 NEW_BBLOCK (cfg, start_bblock);
7931 cfg->bb_entry = start_bblock;
7932 start_bblock->cil_code = NULL;
7933 start_bblock->cil_length = 0;
7936 NEW_BBLOCK (cfg, end_bblock);
7937 cfg->bb_exit = end_bblock;
7938 end_bblock->cil_code = NULL;
7939 end_bblock->cil_length = 0;
7940 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7941 g_assert (cfg->num_bblocks == 2);
7943 arg_array = cfg->args;
7945 if (header->num_clauses) {
7946 cfg->spvars = g_hash_table_new (NULL, NULL);
7947 cfg->exvars = g_hash_table_new (NULL, NULL);
7949 /* handle exception clauses */
7950 for (i = 0; i < header->num_clauses; ++i) {
7951 MonoBasicBlock *try_bb;
7952 MonoExceptionClause *clause = &header->clauses [i];
7953 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7955 try_bb->real_offset = clause->try_offset;
7956 try_bb->try_start = TRUE;
7957 try_bb->region = ((i + 1) << 8) | clause->flags;
7958 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7959 tblock->real_offset = clause->handler_offset;
7960 tblock->flags |= BB_EXCEPTION_HANDLER;
7963 * Linking the try block with the EH block hinders inlining as we won't be able to
7964 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7966 if (COMPILE_LLVM (cfg))
7967 link_bblock (cfg, try_bb, tblock);
7969 if (*(ip + clause->handler_offset) == CEE_POP)
7970 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7972 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7973 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7974 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7975 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7976 MONO_ADD_INS (tblock, ins);
7978 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7979 /* finally clauses already have a seq point */
7980 /* seq points for filter clauses are emitted below */
7981 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7982 MONO_ADD_INS (tblock, ins);
7985 /* todo: is a fault block unsafe to optimize? */
7986 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7987 tblock->flags |= BB_EXCEPTION_UNSAFE;
7990 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7992 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7994 /* catch and filter blocks get the exception object on the stack */
7995 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7996 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7998 /* mostly like handle_stack_args (), but just sets the input args */
7999 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8000 tblock->in_scount = 1;
8001 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8002 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8006 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8007 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8008 if (!cfg->compile_llvm) {
8009 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8010 ins->dreg = tblock->in_stack [0]->dreg;
8011 MONO_ADD_INS (tblock, ins);
8014 MonoInst *dummy_use;
8017 * Add a dummy use for the exvar so its liveness info will be
8020 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8023 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8024 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8025 MONO_ADD_INS (tblock, ins);
8028 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8029 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8030 tblock->flags |= BB_EXCEPTION_HANDLER;
8031 tblock->real_offset = clause->data.filter_offset;
8032 tblock->in_scount = 1;
8033 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8034 /* The filter block shares the exvar with the handler block */
8035 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8036 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8037 MONO_ADD_INS (tblock, ins);
8041 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8042 clause->data.catch_class &&
8044 mono_class_check_context_used (clause->data.catch_class)) {
8046 * In shared generic code with catch
8047 * clauses containing type variables
8048 * the exception handling code has to
8049 * be able to get to the rgctx.
8050 * Therefore we have to make sure that
8051 * the vtable/mrgctx argument (for
8052 * static or generic methods) or the
8053 * "this" argument (for non-static
8054 * methods) are live.
8056 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8057 mini_method_get_context (method)->method_inst ||
8058 method->klass->valuetype) {
8059 mono_get_vtable_var (cfg);
8061 MonoInst *dummy_use;
8063 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8068 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8069 cfg->cbb = start_bblock;
8070 cfg->args = arg_array;
8071 mono_save_args (cfg, sig, inline_args);
8074 /* FIRST CODE BLOCK */
8075 NEW_BBLOCK (cfg, tblock);
8076 tblock->cil_code = ip;
8080 ADD_BBLOCK (cfg, tblock);
8082 if (cfg->method == method) {
8083 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8084 if (breakpoint_id) {
8085 MONO_INST_NEW (cfg, ins, OP_BREAK);
8086 MONO_ADD_INS (cfg->cbb, ins);
8090 /* we use a separate basic block for the initialization code */
8091 NEW_BBLOCK (cfg, init_localsbb);
8092 cfg->bb_init = init_localsbb;
8093 init_localsbb->real_offset = cfg->real_offset;
8094 start_bblock->next_bb = init_localsbb;
8095 init_localsbb->next_bb = cfg->cbb;
8096 link_bblock (cfg, start_bblock, init_localsbb);
8097 link_bblock (cfg, init_localsbb, cfg->cbb);
8099 cfg->cbb = init_localsbb;
8101 if (cfg->gsharedvt && cfg->method == method) {
8102 MonoGSharedVtMethodInfo *info;
8103 MonoInst *var, *locals_var;
8106 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8107 info->method = cfg->method;
8108 info->count_entries = 16;
8109 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8110 cfg->gsharedvt_info = info;
8112 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8113 /* prevent it from being register allocated */
8114 //var->flags |= MONO_INST_VOLATILE;
8115 cfg->gsharedvt_info_var = var;
8117 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8118 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8120 /* Allocate locals */
8121 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8122 /* prevent it from being register allocated */
8123 //locals_var->flags |= MONO_INST_VOLATILE;
8124 cfg->gsharedvt_locals_var = locals_var;
8126 dreg = alloc_ireg (cfg);
8127 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8129 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8130 ins->dreg = locals_var->dreg;
8132 MONO_ADD_INS (cfg->cbb, ins);
8133 cfg->gsharedvt_locals_var_ins = ins;
8135 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8138 ins->flags |= MONO_INST_INIT;
8142 if (mono_security_core_clr_enabled ()) {
8143 /* check if this is native code, e.g. an icall or a p/invoke */
8144 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8145 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8147 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8148 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8150 /* if this ia a native call then it can only be JITted from platform code */
8151 if ((icall || pinvk) && method->klass && method->klass->image) {
8152 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8153 MonoException *ex = icall ? mono_get_exception_security () :
8154 mono_get_exception_method_access ();
8155 emit_throw_exception (cfg, ex);
8162 CHECK_CFG_EXCEPTION;
8164 if (header->code_size == 0)
8167 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8172 if (cfg->method == method)
8173 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8175 for (n = 0; n < header->num_locals; ++n) {
8176 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8181 /* We force the vtable variable here for all shared methods
8182 for the possibility that they might show up in a stack
8183 trace where their exact instantiation is needed. */
8184 if (cfg->gshared && method == cfg->method) {
8185 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8186 mini_method_get_context (method)->method_inst ||
8187 method->klass->valuetype) {
8188 mono_get_vtable_var (cfg);
8190 /* FIXME: Is there a better way to do this?
8191 We need the variable live for the duration
8192 of the whole method. */
8193 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8197 /* add a check for this != NULL to inlined methods */
8198 if (is_virtual_call) {
8201 NEW_ARGLOAD (cfg, arg_ins, 0);
8202 MONO_ADD_INS (cfg->cbb, arg_ins);
8203 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8206 skip_dead_blocks = !dont_verify;
8207 if (skip_dead_blocks) {
8208 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8213 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8214 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8217 start_new_bblock = 0;
8219 if (cfg->method == method)
8220 cfg->real_offset = ip - header->code;
8222 cfg->real_offset = inline_offset;
8227 if (start_new_bblock) {
8228 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8229 if (start_new_bblock == 2) {
8230 g_assert (ip == tblock->cil_code);
8232 GET_BBLOCK (cfg, tblock, ip);
8234 cfg->cbb->next_bb = tblock;
8236 start_new_bblock = 0;
8237 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8238 if (cfg->verbose_level > 3)
8239 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8240 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8244 g_slist_free (class_inits);
8247 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8248 link_bblock (cfg, cfg->cbb, tblock);
8249 if (sp != stack_start) {
8250 handle_stack_args (cfg, stack_start, sp - stack_start);
8252 CHECK_UNVERIFIABLE (cfg);
8254 cfg->cbb->next_bb = tblock;
8256 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8257 if (cfg->verbose_level > 3)
8258 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8259 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8262 g_slist_free (class_inits);
8267 if (skip_dead_blocks) {
8268 int ip_offset = ip - header->code;
8270 if (ip_offset == bb->end)
8274 int op_size = mono_opcode_size (ip, end);
8275 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8277 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8279 if (ip_offset + op_size == bb->end) {
8280 MONO_INST_NEW (cfg, ins, OP_NOP);
8281 MONO_ADD_INS (cfg->cbb, ins);
8282 start_new_bblock = 1;
8290 * Sequence points are points where the debugger can place a breakpoint.
8291 * Currently, we generate these automatically at points where the IL
8294 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8296 * Make methods interruptable at the beginning, and at the targets of
8297 * backward branches.
8298 * Also, do this at the start of every bblock in methods with clauses too,
8299 * to be able to handle instructions with inprecise control flow like
8301 * Backward branches are handled at the end of method-to-ir ().
8303 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8304 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8306 /* Avoid sequence points on empty IL like .volatile */
8307 // FIXME: Enable this
8308 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8309 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8310 if ((sp != stack_start) && !sym_seq_point)
8311 ins->flags |= MONO_INST_NONEMPTY_STACK;
8312 MONO_ADD_INS (cfg->cbb, ins);
8315 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8318 cfg->cbb->real_offset = cfg->real_offset;
8320 if ((cfg->method == method) && cfg->coverage_info) {
8321 guint32 cil_offset = ip - header->code;
8322 cfg->coverage_info->data [cil_offset].cil_code = ip;
8324 /* TODO: Use an increment here */
8325 #if defined(TARGET_X86)
8326 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8327 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8329 MONO_ADD_INS (cfg->cbb, ins);
8331 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8332 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8336 if (cfg->verbose_level > 3)
8337 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8341 if (seq_points && !sym_seq_points && sp != stack_start) {
8343 * The C# compiler uses these nops to notify the JIT that it should
8344 * insert seq points.
8346 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8347 MONO_ADD_INS (cfg->cbb, ins);
8349 if (cfg->keep_cil_nops)
8350 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8352 MONO_INST_NEW (cfg, ins, OP_NOP);
8354 MONO_ADD_INS (cfg->cbb, ins);
8357 if (should_insert_brekpoint (cfg->method)) {
8358 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8360 MONO_INST_NEW (cfg, ins, OP_NOP);
8363 MONO_ADD_INS (cfg->cbb, ins);
8369 CHECK_STACK_OVF (1);
8370 n = (*ip)-CEE_LDARG_0;
8372 EMIT_NEW_ARGLOAD (cfg, ins, n);
8380 CHECK_STACK_OVF (1);
8381 n = (*ip)-CEE_LDLOC_0;
8383 EMIT_NEW_LOCLOAD (cfg, ins, n);
8392 n = (*ip)-CEE_STLOC_0;
8395 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8397 emit_stloc_ir (cfg, sp, header, n);
8404 CHECK_STACK_OVF (1);
8407 EMIT_NEW_ARGLOAD (cfg, ins, n);
8413 CHECK_STACK_OVF (1);
8416 NEW_ARGLOADA (cfg, ins, n);
8417 MONO_ADD_INS (cfg->cbb, ins);
8427 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8429 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8434 CHECK_STACK_OVF (1);
8437 EMIT_NEW_LOCLOAD (cfg, ins, n);
8441 case CEE_LDLOCA_S: {
8442 unsigned char *tmp_ip;
8444 CHECK_STACK_OVF (1);
8445 CHECK_LOCAL (ip [1]);
8447 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8453 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8462 CHECK_LOCAL (ip [1]);
8463 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8465 emit_stloc_ir (cfg, sp, header, ip [1]);
8470 CHECK_STACK_OVF (1);
8471 EMIT_NEW_PCONST (cfg, ins, NULL);
8472 ins->type = STACK_OBJ;
8477 CHECK_STACK_OVF (1);
8478 EMIT_NEW_ICONST (cfg, ins, -1);
8491 CHECK_STACK_OVF (1);
8492 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8498 CHECK_STACK_OVF (1);
8500 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8506 CHECK_STACK_OVF (1);
8507 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8513 CHECK_STACK_OVF (1);
8514 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8515 ins->type = STACK_I8;
8516 ins->dreg = alloc_dreg (cfg, STACK_I8);
8518 ins->inst_l = (gint64)read64 (ip);
8519 MONO_ADD_INS (cfg->cbb, ins);
8525 gboolean use_aotconst = FALSE;
8527 #ifdef TARGET_POWERPC
8528 /* FIXME: Clean this up */
8529 if (cfg->compile_aot)
8530 use_aotconst = TRUE;
8533 /* FIXME: we should really allocate this only late in the compilation process */
8534 f = mono_domain_alloc (cfg->domain, sizeof (float));
8536 CHECK_STACK_OVF (1);
8542 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8544 dreg = alloc_freg (cfg);
8545 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8546 ins->type = cfg->r4_stack_type;
8548 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8549 ins->type = cfg->r4_stack_type;
8550 ins->dreg = alloc_dreg (cfg, STACK_R8);
8552 MONO_ADD_INS (cfg->cbb, ins);
8562 gboolean use_aotconst = FALSE;
8564 #ifdef TARGET_POWERPC
8565 /* FIXME: Clean this up */
8566 if (cfg->compile_aot)
8567 use_aotconst = TRUE;
8570 /* FIXME: we should really allocate this only late in the compilation process */
8571 d = mono_domain_alloc (cfg->domain, sizeof (double));
8573 CHECK_STACK_OVF (1);
8579 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8581 dreg = alloc_freg (cfg);
8582 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8583 ins->type = STACK_R8;
8585 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8586 ins->type = STACK_R8;
8587 ins->dreg = alloc_dreg (cfg, STACK_R8);
8589 MONO_ADD_INS (cfg->cbb, ins);
8598 MonoInst *temp, *store;
8600 CHECK_STACK_OVF (1);
8604 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8605 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8607 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8610 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8623 if (sp [0]->type == STACK_R8)
8624 /* we need to pop the value from the x86 FP stack */
8625 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8630 MonoMethodSignature *fsig;
8633 INLINE_FAILURE ("jmp");
8634 GSHAREDVT_FAILURE (*ip);
8637 if (stack_start != sp)
8639 token = read32 (ip + 1);
8640 /* FIXME: check the signature matches */
8641 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8643 if (!cmethod || mono_loader_get_last_error ())
8646 if (cfg->gshared && mono_method_check_context_used (cmethod))
8647 GENERIC_SHARING_FAILURE (CEE_JMP);
8649 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8651 fsig = mono_method_signature (cmethod);
8652 n = fsig->param_count + fsig->hasthis;
8653 if (cfg->llvm_only) {
8656 args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8657 for (i = 0; i < n; ++i)
8658 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8659 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8661 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8662 * have to emit a normal return since llvm expects it.
8665 emit_setret (cfg, ins);
8666 MONO_INST_NEW (cfg, ins, OP_BR);
8667 ins->inst_target_bb = end_bblock;
8668 MONO_ADD_INS (cfg->cbb, ins);
8669 link_bblock (cfg, cfg->cbb, end_bblock);
8672 } else if (cfg->backend->have_op_tail_call) {
8673 /* Handle tail calls similarly to calls */
8676 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8677 call->method = cmethod;
8678 call->tail_call = TRUE;
8679 call->signature = mono_method_signature (cmethod);
8680 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8681 call->inst.inst_p0 = cmethod;
8682 for (i = 0; i < n; ++i)
8683 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8685 mono_arch_emit_call (cfg, call);
8686 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8687 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8689 for (i = 0; i < num_args; ++i)
8690 /* Prevent arguments from being optimized away */
8691 arg_array [i]->flags |= MONO_INST_VOLATILE;
8693 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8694 ins = (MonoInst*)call;
8695 ins->inst_p0 = cmethod;
8696 MONO_ADD_INS (cfg->cbb, ins);
8700 start_new_bblock = 1;
8705 MonoMethodSignature *fsig;
8708 token = read32 (ip + 1);
8712 //GSHAREDVT_FAILURE (*ip);
8717 fsig = mini_get_signature (method, token, generic_context);
8719 if (method->dynamic && fsig->pinvoke) {
8723 * This is a call through a function pointer using a pinvoke
8724 * signature. Have to create a wrapper and call that instead.
8725 * FIXME: This is very slow, need to create a wrapper at JIT time
8726 * instead based on the signature.
8728 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8729 EMIT_NEW_PCONST (cfg, args [1], fsig);
8731 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8734 n = fsig->param_count + fsig->hasthis;
8738 //g_assert (!virtual || fsig->hasthis);
8742 inline_costs += 10 * num_calls++;
8745 * Making generic calls out of gsharedvt methods.
8746 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8747 * patching gshared method addresses into a gsharedvt method.
8749 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8751 * We pass the address to the gsharedvt trampoline in the rgctx reg
8753 MonoInst *callee = addr;
8755 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8757 GSHAREDVT_FAILURE (*ip);
8759 addr = emit_get_rgctx_sig (cfg, context_used,
8760 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8761 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8765 /* Prevent inlining of methods with indirect calls */
8766 INLINE_FAILURE ("indirect call");
8768 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8773 * Instead of emitting an indirect call, emit a direct call
8774 * with the contents of the aotconst as the patch info.
8776 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8777 info_type = addr->inst_c1;
8778 info_data = addr->inst_p0;
8780 info_type = addr->inst_right->inst_c1;
8781 info_data = addr->inst_right->inst_left;
8784 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8785 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8790 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8794 /* End of call, INS should contain the result of the call, if any */
8796 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8798 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8801 CHECK_CFG_EXCEPTION;
8805 constrained_class = NULL;
8809 case CEE_CALLVIRT: {
8810 MonoInst *addr = NULL;
8811 MonoMethodSignature *fsig = NULL;
8813 int virtual = *ip == CEE_CALLVIRT;
8814 gboolean pass_imt_from_rgctx = FALSE;
8815 MonoInst *imt_arg = NULL;
8816 MonoInst *keep_this_alive = NULL;
8817 gboolean pass_vtable = FALSE;
8818 gboolean pass_mrgctx = FALSE;
8819 MonoInst *vtable_arg = NULL;
8820 gboolean check_this = FALSE;
8821 gboolean supported_tail_call = FALSE;
8822 gboolean tail_call = FALSE;
8823 gboolean need_seq_point = FALSE;
8824 guint32 call_opcode = *ip;
8825 gboolean emit_widen = TRUE;
8826 gboolean push_res = TRUE;
8827 gboolean skip_ret = FALSE;
8828 gboolean delegate_invoke = FALSE;
8829 gboolean direct_icall = FALSE;
8830 gboolean constrained_partial_call = FALSE;
8831 MonoMethod *cil_method;
8834 token = read32 (ip + 1);
8838 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8839 cil_method = cmethod;
8841 if (constrained_class) {
8842 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8843 if (!mini_is_gsharedvt_klass (constrained_class)) {
8844 g_assert (!cmethod->klass->valuetype);
8845 if (!mini_type_is_reference (&constrained_class->byval_arg))
8846 constrained_partial_call = TRUE;
8850 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8851 if (cfg->verbose_level > 2)
8852 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8853 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8854 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8856 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8860 if (cfg->verbose_level > 2)
8861 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8863 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8865 * This is needed since get_method_constrained can't find
8866 * the method in klass representing a type var.
8867 * The type var is guaranteed to be a reference type in this
8870 if (!mini_is_gsharedvt_klass (constrained_class))
8871 g_assert (!cmethod->klass->valuetype);
8873 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8879 if (!cmethod || mono_loader_get_last_error ())
8881 if (!dont_verify && !cfg->skip_visibility) {
8882 MonoMethod *target_method = cil_method;
8883 if (method->is_inflated) {
8884 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8886 if (!mono_method_can_access_method (method_definition, target_method) &&
8887 !mono_method_can_access_method (method, cil_method))
8888 METHOD_ACCESS_FAILURE (method, cil_method);
8891 if (mono_security_core_clr_enabled ())
8892 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8894 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8895 /* MS.NET seems to silently convert this to a callvirt */
8900 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8901 * converts to a callvirt.
8903 * tests/bug-515884.il is an example of this behavior
8905 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8906 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8907 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8911 if (!cmethod->klass->inited)
8912 if (!mono_class_init (cmethod->klass))
8913 TYPE_LOAD_ERROR (cmethod->klass);
8915 fsig = mono_method_signature (cmethod);
8918 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8919 mini_class_is_system_array (cmethod->klass)) {
8920 array_rank = cmethod->klass->rank;
8921 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8922 direct_icall = TRUE;
8923 } else if (fsig->pinvoke) {
8924 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8925 fsig = mono_method_signature (wrapper);
8926 } else if (constrained_class) {
8928 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8932 /* See code below */
8933 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8934 MonoBasicBlock *tbb;
8936 GET_BBLOCK (cfg, tbb, ip + 5);
8937 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8939 * We want to extend the try block to cover the call, but we can't do it if the
8940 * call is made directly since its followed by an exception check.
8942 direct_icall = FALSE;
8946 mono_save_token_info (cfg, image, token, cil_method);
8948 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8949 need_seq_point = TRUE;
8951 /* Don't support calls made using type arguments for now */
8953 if (cfg->gsharedvt) {
8954 if (mini_is_gsharedvt_signature (fsig))
8955 GSHAREDVT_FAILURE (*ip);
8959 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8960 g_assert_not_reached ();
8962 n = fsig->param_count + fsig->hasthis;
8964 if (!cfg->gshared && cmethod->klass->generic_container)
8968 g_assert (!mono_method_check_context_used (cmethod));
8972 //g_assert (!virtual || fsig->hasthis);
8976 if (constrained_class) {
8977 if (mini_is_gsharedvt_klass (constrained_class)) {
8978 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8979 /* The 'Own method' case below */
8980 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8981 /* 'The type parameter is instantiated as a reference type' case below. */
8983 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8984 CHECK_CFG_EXCEPTION;
8991 * We have the `constrained.' prefix opcode.
8993 if (constrained_partial_call) {
8994 gboolean need_box = TRUE;
8997 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8998 * called method is not known at compile time either. The called method could end up being
8999 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9000 * to box the receiver.
9001 * A simple solution would be to box always and make a normal virtual call, but that would
9002 * be bad performance wise.
9004 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9006 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9011 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9012 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9013 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9014 ins->klass = constrained_class;
9015 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9016 CHECK_CFG_EXCEPTION;
9017 } else if (need_box) {
9019 MonoBasicBlock *is_ref_bb, *end_bb;
9020 MonoInst *nonbox_call;
9023 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9025 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9026 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9028 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9030 NEW_BBLOCK (cfg, is_ref_bb);
9031 NEW_BBLOCK (cfg, end_bb);
9033 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9034 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
9035 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9038 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9040 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9043 MONO_START_BB (cfg, is_ref_bb);
9044 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9045 ins->klass = constrained_class;
9046 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9047 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9049 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9051 MONO_START_BB (cfg, end_bb);
9054 nonbox_call->dreg = ins->dreg;
9057 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9058 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9059 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9062 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9064 * The type parameter is instantiated as a valuetype,
9065 * but that type doesn't override the method we're
9066 * calling, so we need to box `this'.
9068 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9069 ins->klass = constrained_class;
9070 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9071 CHECK_CFG_EXCEPTION;
9072 } else if (!constrained_class->valuetype) {
9073 int dreg = alloc_ireg_ref (cfg);
9076 * The type parameter is instantiated as a reference
9077 * type. We have a managed pointer on the stack, so
9078 * we need to dereference it here.
9080 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9081 ins->type = STACK_OBJ;
9084 if (cmethod->klass->valuetype) {
9087 /* Interface method */
9090 mono_class_setup_vtable (constrained_class);
9091 CHECK_TYPELOAD (constrained_class);
9092 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9094 TYPE_LOAD_ERROR (constrained_class);
9095 slot = mono_method_get_vtable_slot (cmethod);
9097 TYPE_LOAD_ERROR (cmethod->klass);
9098 cmethod = constrained_class->vtable [ioffset + slot];
9100 if (cmethod->klass == mono_defaults.enum_class) {
9101 /* Enum implements some interfaces, so treat this as the first case */
9102 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9103 ins->klass = constrained_class;
9104 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9105 CHECK_CFG_EXCEPTION;
9110 constrained_class = NULL;
9113 if (check_call_signature (cfg, fsig, sp))
9116 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9117 delegate_invoke = TRUE;
9119 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9120 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9121 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9129 * If the callee is a shared method, then its static cctor
9130 * might not get called after the call was patched.
9132 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9133 emit_class_init (cfg, cmethod->klass);
9134 CHECK_TYPELOAD (cmethod->klass);
9137 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9140 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9142 context_used = mini_method_check_context_used (cfg, cmethod);
9144 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9145 /* Generic method interface
9146 calls are resolved via a
9147 helper function and don't
9149 if (!cmethod_context || !cmethod_context->method_inst)
9150 pass_imt_from_rgctx = TRUE;
9154 * If a shared method calls another
9155 * shared method then the caller must
9156 * have a generic sharing context
9157 * because the magic trampoline
9158 * requires it. FIXME: We shouldn't
9159 * have to force the vtable/mrgctx
9160 * variable here. Instead there
9161 * should be a flag in the cfg to
9162 * request a generic sharing context.
9165 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9166 mono_get_vtable_var (cfg);
9171 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9173 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9175 CHECK_TYPELOAD (cmethod->klass);
9176 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9181 g_assert (!vtable_arg);
9183 if (!cfg->compile_aot) {
9185 * emit_get_rgctx_method () calls mono_class_vtable () so check
9186 * for type load errors before.
9188 mono_class_setup_vtable (cmethod->klass);
9189 CHECK_TYPELOAD (cmethod->klass);
9192 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9194 /* !marshalbyref is needed to properly handle generic methods + remoting */
9195 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9196 MONO_METHOD_IS_FINAL (cmethod)) &&
9197 !mono_class_is_marshalbyref (cmethod->klass)) {
9204 if (pass_imt_from_rgctx) {
9205 g_assert (!pass_vtable);
9207 imt_arg = emit_get_rgctx_method (cfg, context_used,
9208 cmethod, MONO_RGCTX_INFO_METHOD);
9212 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9214 /* Calling virtual generic methods */
9215 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9216 !(MONO_METHOD_IS_FINAL (cmethod) &&
9217 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9218 fsig->generic_param_count &&
9219 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9221 MonoInst *this_temp, *this_arg_temp, *store;
9222 MonoInst *iargs [4];
9224 g_assert (fsig->is_inflated);
9226 /* Prevent inlining of methods that contain indirect calls */
9227 INLINE_FAILURE ("virtual generic call");
9229 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9230 GSHAREDVT_FAILURE (*ip);
9232 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9233 g_assert (!imt_arg);
9235 g_assert (cmethod->is_inflated);
9236 imt_arg = emit_get_rgctx_method (cfg, context_used,
9237 cmethod, MONO_RGCTX_INFO_METHOD);
9238 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9240 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9241 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9242 MONO_ADD_INS (cfg->cbb, store);
9244 /* FIXME: This should be a managed pointer */
9245 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9247 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9248 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9249 cmethod, MONO_RGCTX_INFO_METHOD);
9250 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9251 addr = mono_emit_jit_icall (cfg,
9252 mono_helper_compile_generic_method, iargs);
9254 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9256 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9263 * Implement a workaround for the inherent races involved in locking:
9269 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9270 * try block, the Exit () won't be executed, see:
9271 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9272 * To work around this, we extend such try blocks to include the last x bytes
9273 * of the Monitor.Enter () call.
9275 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9276 MonoBasicBlock *tbb;
9278 GET_BBLOCK (cfg, tbb, ip + 5);
9280 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9281 * from Monitor.Enter like ArgumentNullException.
9283 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9284 /* Mark this bblock as needing to be extended */
9285 tbb->extend_try_block = TRUE;
9289 /* Conversion to a JIT intrinsic */
9290 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9291 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9292 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9299 if ((cfg->opt & MONO_OPT_INLINE) &&
9300 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9301 mono_method_check_inlining (cfg, cmethod)) {
9303 gboolean always = FALSE;
9305 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9306 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9307 /* Prevent inlining of methods that call wrappers */
9308 INLINE_FAILURE ("wrapper call");
9309 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9313 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9315 cfg->real_offset += 5;
9317 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9318 /* *sp is already set by inline_method */
9323 inline_costs += costs;
9329 /* Tail recursion elimination */
9330 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9331 gboolean has_vtargs = FALSE;
9334 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9335 INLINE_FAILURE ("tail call");
9337 /* keep it simple */
9338 for (i = fsig->param_count - 1; i >= 0; i--) {
9339 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9344 for (i = 0; i < n; ++i)
9345 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9346 MONO_INST_NEW (cfg, ins, OP_BR);
9347 MONO_ADD_INS (cfg->cbb, ins);
9348 tblock = start_bblock->out_bb [0];
9349 link_bblock (cfg, cfg->cbb, tblock);
9350 ins->inst_target_bb = tblock;
9351 start_new_bblock = 1;
9353 /* skip the CEE_RET, too */
9354 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9361 inline_costs += 10 * num_calls++;
9364 * Making generic calls out of gsharedvt methods.
9365 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9366 * patching gshared method addresses into a gsharedvt method.
9368 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9369 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9370 MonoRgctxInfoType info_type;
9373 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9374 //GSHAREDVT_FAILURE (*ip);
9375 // disable for possible remoting calls
9376 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9377 GSHAREDVT_FAILURE (*ip);
9378 if (fsig->generic_param_count) {
9379 /* virtual generic call */
9380 g_assert (!imt_arg);
9381 /* Same as the virtual generic case above */
9382 imt_arg = emit_get_rgctx_method (cfg, context_used,
9383 cmethod, MONO_RGCTX_INFO_METHOD);
9384 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9386 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9387 /* This can happen when we call a fully instantiated iface method */
9388 imt_arg = emit_get_rgctx_method (cfg, context_used,
9389 cmethod, MONO_RGCTX_INFO_METHOD);
9394 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9395 keep_this_alive = sp [0];
9397 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9398 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9400 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9401 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9403 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9407 /* Generic sharing */
9410 * Use this if the callee is gsharedvt sharable too, since
9411 * at runtime we might find an instantiation so the call cannot
9412 * be patched (the 'no_patch' code path in mini-trampolines.c).
9414 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9415 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9416 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9417 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9418 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9419 INLINE_FAILURE ("gshared");
9421 g_assert (cfg->gshared && cmethod);
9425 * We are compiling a call to a
9426 * generic method from shared code,
9427 * which means that we have to look up
9428 * the method in the rgctx and do an
9432 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9434 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9435 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9439 /* Direct calls to icalls */
9441 MonoMethod *wrapper;
9444 /* Inline the wrapper */
9445 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9447 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9448 g_assert (costs > 0);
9449 cfg->real_offset += 5;
9451 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9452 /* *sp is already set by inline_method */
9457 inline_costs += costs;
9466 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9467 MonoInst *val = sp [fsig->param_count];
9469 if (val->type == STACK_OBJ) {
9470 MonoInst *iargs [2];
9475 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9478 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9479 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9480 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9481 emit_write_barrier (cfg, addr, val);
9482 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9483 GSHAREDVT_FAILURE (*ip);
9484 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9485 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9487 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9488 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9489 if (!cmethod->klass->element_class->valuetype && !readonly)
9490 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9491 CHECK_TYPELOAD (cmethod->klass);
9494 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9497 g_assert_not_reached ();
9504 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9508 /* Tail prefix / tail call optimization */
9510 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9511 /* FIXME: runtime generic context pointer for jumps? */
9512 /* FIXME: handle this for generic sharing eventually */
9513 if ((ins_flag & MONO_INST_TAILCALL) &&
9514 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9515 supported_tail_call = TRUE;
9517 if (supported_tail_call) {
9520 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9521 INLINE_FAILURE ("tail call");
9523 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9525 if (cfg->backend->have_op_tail_call) {
9526 /* Handle tail calls similarly to normal calls */
9529 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9531 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9532 call->tail_call = TRUE;
9533 call->method = cmethod;
9534 call->signature = mono_method_signature (cmethod);
9537 * We implement tail calls by storing the actual arguments into the
9538 * argument variables, then emitting a CEE_JMP.
9540 for (i = 0; i < n; ++i) {
9541 /* Prevent argument from being register allocated */
9542 arg_array [i]->flags |= MONO_INST_VOLATILE;
9543 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9545 ins = (MonoInst*)call;
9546 ins->inst_p0 = cmethod;
9547 ins->inst_p1 = arg_array [0];
9548 MONO_ADD_INS (cfg->cbb, ins);
9549 link_bblock (cfg, cfg->cbb, end_bblock);
9550 start_new_bblock = 1;
9552 // FIXME: Eliminate unreachable epilogs
9555 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9556 * only reachable from this call.
9558 GET_BBLOCK (cfg, tblock, ip + 5);
9559 if (tblock == cfg->cbb || tblock->in_count == 0)
9568 * Synchronized wrappers.
9569 * Its hard to determine where to replace a method with its synchronized
9570 * wrapper without causing an infinite recursion. The current solution is
9571 * to add the synchronized wrapper in the trampolines, and to
9572 * change the called method to a dummy wrapper, and resolve that wrapper
9573 * to the real method in mono_jit_compile_method ().
9575 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9576 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9577 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9578 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9582 * Interface calls in llvm-only mode are complicated becase the callee might need an rgctx arg,
9583 * (i.e. its a vtype method), and there is no way to for the caller to know this at compile time.
9584 * So we make resolve_iface_call return the rgctx, and do two calls with different signatures
9585 * based on whenever there is an rgctx or not.
9587 if (cfg->llvm_only && virtual && cmethod && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9588 MonoInst *args_buf [16], *icall_args [16];
9590 MonoBasicBlock *rgctx_bb, *end_bb;
9591 MonoInst *call1, *call2, *call_target;
9592 MonoMethodSignature *rgctx_sig;
9593 int rgctx_reg, tmp_reg;
9595 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9597 NEW_BBLOCK (cfg, rgctx_bb);
9598 NEW_BBLOCK (cfg, end_bb);
9600 // FIXME: Optimize this
9602 guint32 imt_slot = mono_method_get_imt_slot (cmethod);
9604 icall_args [0] = sp [0];
9605 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
9607 icall_args [2] = imt_arg;
9609 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, cmethod);
9610 icall_args [2] = ins;
9613 rgctx_reg = alloc_preg (cfg);
9614 MONO_EMIT_NEW_PCONST (cfg, rgctx_reg, NULL);
9615 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], rgctx_reg, &mono_defaults.int_class->byval_arg);
9616 //EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
9618 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
9620 // FIXME: Only do this if needed (generic calls)
9622 // Check whenever to pass an rgctx
9623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
9624 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, rgctx_bb);
9625 /* Non rgctx case */
9626 call1 = mono_emit_calli (cfg, fsig, sp, call_target, NULL, vtable_arg);
9627 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9629 MONO_START_BB (cfg, rgctx_bb);
9630 /* Make a call with an rgctx */
9631 if (fsig->param_count + 2 < 16)
9634 args = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
9636 for (i = 0; i < fsig->param_count; ++i)
9637 args [i + 1] = sp [i + 1];
9638 tmp_reg = alloc_preg (cfg);
9639 EMIT_NEW_UNALU (cfg, args [fsig->param_count + 1], OP_MOVE, tmp_reg, rgctx_reg);
9640 rgctx_sig = sig_to_rgctx_sig (fsig);
9641 call2 = mono_emit_calli (cfg, rgctx_sig, args, call_target, NULL, NULL);
9642 call2->dreg = call1->dreg;
9643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9645 MONO_START_BB (cfg, end_bb);
9651 INLINE_FAILURE ("call");
9652 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9653 imt_arg, vtable_arg);
9655 if (tail_call && !cfg->llvm_only) {
9656 link_bblock (cfg, cfg->cbb, end_bblock);
9657 start_new_bblock = 1;
9659 // FIXME: Eliminate unreachable epilogs
9662 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9663 * only reachable from this call.
9665 GET_BBLOCK (cfg, tblock, ip + 5);
9666 if (tblock == cfg->cbb || tblock->in_count == 0)
9673 /* End of call, INS should contain the result of the call, if any */
9675 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9678 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9683 if (keep_this_alive) {
9684 MonoInst *dummy_use;
9686 /* See mono_emit_method_call_full () */
9687 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9690 CHECK_CFG_EXCEPTION;
9694 g_assert (*ip == CEE_RET);
9698 constrained_class = NULL;
9700 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9704 if (cfg->method != method) {
9705 /* return from inlined method */
9707 * If in_count == 0, that means the ret is unreachable due to
9708 * being preceeded by a throw. In that case, inline_method () will
9709 * handle setting the return value
9710 * (test case: test_0_inline_throw ()).
9712 if (return_var && cfg->cbb->in_count) {
9713 MonoType *ret_type = mono_method_signature (method)->ret;
9719 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9722 //g_assert (returnvar != -1);
9723 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9724 cfg->ret_var_set = TRUE;
9727 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9729 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9733 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9735 if (seq_points && !sym_seq_points) {
9737 * Place a seq point here too even through the IL stack is not
9738 * empty, so a step over on
9741 * will work correctly.
9743 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9744 MONO_ADD_INS (cfg->cbb, ins);
9747 g_assert (!return_var);
9751 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9754 emit_setret (cfg, *sp);
9757 if (sp != stack_start)
9759 MONO_INST_NEW (cfg, ins, OP_BR);
9761 ins->inst_target_bb = end_bblock;
9762 MONO_ADD_INS (cfg->cbb, ins);
9763 link_bblock (cfg, cfg->cbb, end_bblock);
9764 start_new_bblock = 1;
9768 MONO_INST_NEW (cfg, ins, OP_BR);
9770 target = ip + 1 + (signed char)(*ip);
9772 GET_BBLOCK (cfg, tblock, target);
9773 link_bblock (cfg, cfg->cbb, tblock);
9774 ins->inst_target_bb = tblock;
9775 if (sp != stack_start) {
9776 handle_stack_args (cfg, stack_start, sp - stack_start);
9778 CHECK_UNVERIFIABLE (cfg);
9780 MONO_ADD_INS (cfg->cbb, ins);
9781 start_new_bblock = 1;
9782 inline_costs += BRANCH_COST;
9796 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9798 target = ip + 1 + *(signed char*)ip;
9804 inline_costs += BRANCH_COST;
9808 MONO_INST_NEW (cfg, ins, OP_BR);
9811 target = ip + 4 + (gint32)read32(ip);
9813 GET_BBLOCK (cfg, tblock, target);
9814 link_bblock (cfg, cfg->cbb, tblock);
9815 ins->inst_target_bb = tblock;
9816 if (sp != stack_start) {
9817 handle_stack_args (cfg, stack_start, sp - stack_start);
9819 CHECK_UNVERIFIABLE (cfg);
9822 MONO_ADD_INS (cfg->cbb, ins);
9824 start_new_bblock = 1;
9825 inline_costs += BRANCH_COST;
9832 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9833 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9834 guint32 opsize = is_short ? 1 : 4;
9836 CHECK_OPSIZE (opsize);
9838 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9841 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9846 GET_BBLOCK (cfg, tblock, target);
9847 link_bblock (cfg, cfg->cbb, tblock);
9848 GET_BBLOCK (cfg, tblock, ip);
9849 link_bblock (cfg, cfg->cbb, tblock);
9851 if (sp != stack_start) {
9852 handle_stack_args (cfg, stack_start, sp - stack_start);
9853 CHECK_UNVERIFIABLE (cfg);
9856 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9857 cmp->sreg1 = sp [0]->dreg;
9858 type_from_op (cfg, cmp, sp [0], NULL);
9861 #if SIZEOF_REGISTER == 4
9862 if (cmp->opcode == OP_LCOMPARE_IMM) {
9863 /* Convert it to OP_LCOMPARE */
9864 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9865 ins->type = STACK_I8;
9866 ins->dreg = alloc_dreg (cfg, STACK_I8);
9868 MONO_ADD_INS (cfg->cbb, ins);
9869 cmp->opcode = OP_LCOMPARE;
9870 cmp->sreg2 = ins->dreg;
9873 MONO_ADD_INS (cfg->cbb, cmp);
9875 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9876 type_from_op (cfg, ins, sp [0], NULL);
9877 MONO_ADD_INS (cfg->cbb, ins);
9878 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9879 GET_BBLOCK (cfg, tblock, target);
9880 ins->inst_true_bb = tblock;
9881 GET_BBLOCK (cfg, tblock, ip);
9882 ins->inst_false_bb = tblock;
9883 start_new_bblock = 2;
9886 inline_costs += BRANCH_COST;
9901 MONO_INST_NEW (cfg, ins, *ip);
9903 target = ip + 4 + (gint32)read32(ip);
9909 inline_costs += BRANCH_COST;
9913 MonoBasicBlock **targets;
9914 MonoBasicBlock *default_bblock;
9915 MonoJumpInfoBBTable *table;
9916 int offset_reg = alloc_preg (cfg);
9917 int target_reg = alloc_preg (cfg);
9918 int table_reg = alloc_preg (cfg);
9919 int sum_reg = alloc_preg (cfg);
9920 gboolean use_op_switch;
9924 n = read32 (ip + 1);
9927 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9931 CHECK_OPSIZE (n * sizeof (guint32));
9932 target = ip + n * sizeof (guint32);
9934 GET_BBLOCK (cfg, default_bblock, target);
9935 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9937 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9938 for (i = 0; i < n; ++i) {
9939 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9940 targets [i] = tblock;
9941 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9945 if (sp != stack_start) {
9947 * Link the current bb with the targets as well, so handle_stack_args
9948 * will set their in_stack correctly.
9950 link_bblock (cfg, cfg->cbb, default_bblock);
9951 for (i = 0; i < n; ++i)
9952 link_bblock (cfg, cfg->cbb, targets [i]);
9954 handle_stack_args (cfg, stack_start, sp - stack_start);
9956 CHECK_UNVERIFIABLE (cfg);
9958 /* Undo the links */
9959 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9960 for (i = 0; i < n; ++i)
9961 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9964 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9965 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9967 for (i = 0; i < n; ++i)
9968 link_bblock (cfg, cfg->cbb, targets [i]);
9970 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9971 table->table = targets;
9972 table->table_size = n;
9974 use_op_switch = FALSE;
9976 /* ARM implements SWITCH statements differently */
9977 /* FIXME: Make it use the generic implementation */
9978 if (!cfg->compile_aot)
9979 use_op_switch = TRUE;
9982 if (COMPILE_LLVM (cfg))
9983 use_op_switch = TRUE;
9985 cfg->cbb->has_jump_table = 1;
9987 if (use_op_switch) {
9988 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9989 ins->sreg1 = src1->dreg;
9990 ins->inst_p0 = table;
9991 ins->inst_many_bb = targets;
9992 ins->klass = GUINT_TO_POINTER (n);
9993 MONO_ADD_INS (cfg->cbb, ins);
9995 if (sizeof (gpointer) == 8)
9996 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10000 #if SIZEOF_REGISTER == 8
10001 /* The upper word might not be zero, and we add it to a 64 bit address later */
10002 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10005 if (cfg->compile_aot) {
10006 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10008 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10009 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10010 ins->inst_p0 = table;
10011 ins->dreg = table_reg;
10012 MONO_ADD_INS (cfg->cbb, ins);
10015 /* FIXME: Use load_memindex */
10016 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10017 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10018 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10020 start_new_bblock = 1;
10021 inline_costs += (BRANCH_COST * 2);
10034 case CEE_LDIND_REF:
10041 dreg = alloc_freg (cfg);
10044 dreg = alloc_lreg (cfg);
10046 case CEE_LDIND_REF:
10047 dreg = alloc_ireg_ref (cfg);
10050 dreg = alloc_preg (cfg);
10053 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10054 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10055 if (*ip == CEE_LDIND_R4)
10056 ins->type = cfg->r4_stack_type;
10057 ins->flags |= ins_flag;
10058 MONO_ADD_INS (cfg->cbb, ins);
10060 if (ins_flag & MONO_INST_VOLATILE) {
10061 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10062 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10067 case CEE_STIND_REF:
10078 if (ins_flag & MONO_INST_VOLATILE) {
10079 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10080 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10083 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10084 ins->flags |= ins_flag;
10087 MONO_ADD_INS (cfg->cbb, ins);
10089 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10090 emit_write_barrier (cfg, sp [0], sp [1]);
10099 MONO_INST_NEW (cfg, ins, (*ip));
10101 ins->sreg1 = sp [0]->dreg;
10102 ins->sreg2 = sp [1]->dreg;
10103 type_from_op (cfg, ins, sp [0], sp [1]);
10105 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10107 /* Use the immediate opcodes if possible */
10108 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10109 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10110 if (imm_opcode != -1) {
10111 ins->opcode = imm_opcode;
10112 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10115 NULLIFY_INS (sp [1]);
10119 MONO_ADD_INS ((cfg)->cbb, (ins));
10121 *sp++ = mono_decompose_opcode (cfg, ins);
10138 MONO_INST_NEW (cfg, ins, (*ip));
10140 ins->sreg1 = sp [0]->dreg;
10141 ins->sreg2 = sp [1]->dreg;
10142 type_from_op (cfg, ins, sp [0], sp [1]);
10144 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10145 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10147 /* FIXME: Pass opcode to is_inst_imm */
10149 /* Use the immediate opcodes if possible */
10150 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10153 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10154 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10155 /* Keep emulated opcodes which are optimized away later */
10156 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10157 imm_opcode = mono_op_to_op_imm (ins->opcode);
10160 if (imm_opcode != -1) {
10161 ins->opcode = imm_opcode;
10162 if (sp [1]->opcode == OP_I8CONST) {
10163 #if SIZEOF_REGISTER == 8
10164 ins->inst_imm = sp [1]->inst_l;
10166 ins->inst_ls_word = sp [1]->inst_ls_word;
10167 ins->inst_ms_word = sp [1]->inst_ms_word;
10171 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10174 /* Might be followed by an instruction added by add_widen_op */
10175 if (sp [1]->next == NULL)
10176 NULLIFY_INS (sp [1]);
10179 MONO_ADD_INS ((cfg)->cbb, (ins));
10181 *sp++ = mono_decompose_opcode (cfg, ins);
10194 case CEE_CONV_OVF_I8:
10195 case CEE_CONV_OVF_U8:
10196 case CEE_CONV_R_UN:
10199 /* Special case this earlier so we have long constants in the IR */
10200 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10201 int data = sp [-1]->inst_c0;
10202 sp [-1]->opcode = OP_I8CONST;
10203 sp [-1]->type = STACK_I8;
10204 #if SIZEOF_REGISTER == 8
10205 if ((*ip) == CEE_CONV_U8)
10206 sp [-1]->inst_c0 = (guint32)data;
10208 sp [-1]->inst_c0 = data;
10210 sp [-1]->inst_ls_word = data;
10211 if ((*ip) == CEE_CONV_U8)
10212 sp [-1]->inst_ms_word = 0;
10214 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10216 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10223 case CEE_CONV_OVF_I4:
10224 case CEE_CONV_OVF_I1:
10225 case CEE_CONV_OVF_I2:
10226 case CEE_CONV_OVF_I:
10227 case CEE_CONV_OVF_U:
10230 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10231 ADD_UNOP (CEE_CONV_OVF_I8);
10238 case CEE_CONV_OVF_U1:
10239 case CEE_CONV_OVF_U2:
10240 case CEE_CONV_OVF_U4:
10243 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10244 ADD_UNOP (CEE_CONV_OVF_U8);
10251 case CEE_CONV_OVF_I1_UN:
10252 case CEE_CONV_OVF_I2_UN:
10253 case CEE_CONV_OVF_I4_UN:
10254 case CEE_CONV_OVF_I8_UN:
10255 case CEE_CONV_OVF_U1_UN:
10256 case CEE_CONV_OVF_U2_UN:
10257 case CEE_CONV_OVF_U4_UN:
10258 case CEE_CONV_OVF_U8_UN:
10259 case CEE_CONV_OVF_I_UN:
10260 case CEE_CONV_OVF_U_UN:
10267 CHECK_CFG_EXCEPTION;
10271 case CEE_ADD_OVF_UN:
10273 case CEE_MUL_OVF_UN:
10275 case CEE_SUB_OVF_UN:
10281 GSHAREDVT_FAILURE (*ip);
10284 token = read32 (ip + 1);
10285 klass = mini_get_class (method, token, generic_context);
10286 CHECK_TYPELOAD (klass);
10288 if (generic_class_is_reference_type (cfg, klass)) {
10289 MonoInst *store, *load;
10290 int dreg = alloc_ireg_ref (cfg);
10292 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10293 load->flags |= ins_flag;
10294 MONO_ADD_INS (cfg->cbb, load);
10296 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10297 store->flags |= ins_flag;
10298 MONO_ADD_INS (cfg->cbb, store);
10300 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10301 emit_write_barrier (cfg, sp [0], sp [1]);
10303 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10309 int loc_index = -1;
10315 token = read32 (ip + 1);
10316 klass = mini_get_class (method, token, generic_context);
10317 CHECK_TYPELOAD (klass);
10319 /* Optimize the common ldobj+stloc combination */
10322 loc_index = ip [6];
10329 loc_index = ip [5] - CEE_STLOC_0;
10336 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10337 CHECK_LOCAL (loc_index);
10339 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10340 ins->dreg = cfg->locals [loc_index]->dreg;
10341 ins->flags |= ins_flag;
10344 if (ins_flag & MONO_INST_VOLATILE) {
10345 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10346 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10352 /* Optimize the ldobj+stobj combination */
10353 /* The reference case ends up being a load+store anyway */
10354 /* Skip this if the operation is volatile. */
10355 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10360 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10367 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10368 ins->flags |= ins_flag;
10371 if (ins_flag & MONO_INST_VOLATILE) {
10372 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10373 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10382 CHECK_STACK_OVF (1);
10384 n = read32 (ip + 1);
10386 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10387 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10388 ins->type = STACK_OBJ;
10391 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10392 MonoInst *iargs [1];
10393 char *str = mono_method_get_wrapper_data (method, n);
10395 if (cfg->compile_aot)
10396 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10398 EMIT_NEW_PCONST (cfg, iargs [0], str);
10399 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10401 if (cfg->opt & MONO_OPT_SHARED) {
10402 MonoInst *iargs [3];
10404 if (cfg->compile_aot) {
10405 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10407 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10408 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10409 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10410 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10411 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10413 if (cfg->cbb->out_of_line) {
10414 MonoInst *iargs [2];
10416 if (image == mono_defaults.corlib) {
10418 * Avoid relocations in AOT and save some space by using a
10419 * version of helper_ldstr specialized to mscorlib.
10421 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10422 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10424 /* Avoid creating the string object */
10425 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10426 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10427 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10431 if (cfg->compile_aot) {
10432 NEW_LDSTRCONST (cfg, ins, image, n);
10434 MONO_ADD_INS (cfg->cbb, ins);
10437 NEW_PCONST (cfg, ins, NULL);
10438 ins->type = STACK_OBJ;
10439 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10441 OUT_OF_MEMORY_FAILURE;
10444 MONO_ADD_INS (cfg->cbb, ins);
10453 MonoInst *iargs [2];
10454 MonoMethodSignature *fsig;
10457 MonoInst *vtable_arg = NULL;
10460 token = read32 (ip + 1);
10461 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10462 if (!cmethod || mono_loader_get_last_error ())
10464 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10467 mono_save_token_info (cfg, image, token, cmethod);
10469 if (!mono_class_init (cmethod->klass))
10470 TYPE_LOAD_ERROR (cmethod->klass);
10472 context_used = mini_method_check_context_used (cfg, cmethod);
10474 if (mono_security_core_clr_enabled ())
10475 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10477 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10478 emit_class_init (cfg, cmethod->klass);
10479 CHECK_TYPELOAD (cmethod->klass);
10483 if (cfg->gsharedvt) {
10484 if (mini_is_gsharedvt_variable_signature (sig))
10485 GSHAREDVT_FAILURE (*ip);
10489 n = fsig->param_count;
10493 * Generate smaller code for the common newobj <exception> instruction in
10494 * argument checking code.
10496 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10497 is_exception_class (cmethod->klass) && n <= 2 &&
10498 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10499 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10500 MonoInst *iargs [3];
10504 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10507 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10510 iargs [1] = sp [0];
10511 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10514 iargs [1] = sp [0];
10515 iargs [2] = sp [1];
10516 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10519 g_assert_not_reached ();
10527 /* move the args to allow room for 'this' in the first position */
10533 /* check_call_signature () requires sp[0] to be set */
10534 this_ins.type = STACK_OBJ;
10535 sp [0] = &this_ins;
10536 if (check_call_signature (cfg, fsig, sp))
10541 if (mini_class_is_system_array (cmethod->klass)) {
10542 *sp = emit_get_rgctx_method (cfg, context_used,
10543 cmethod, MONO_RGCTX_INFO_METHOD);
10545 /* Avoid varargs in the common case */
10546 if (fsig->param_count == 1)
10547 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10548 else if (fsig->param_count == 2)
10549 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10550 else if (fsig->param_count == 3)
10551 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10552 else if (fsig->param_count == 4)
10553 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10555 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10556 } else if (cmethod->string_ctor) {
10557 g_assert (!context_used);
10558 g_assert (!vtable_arg);
10559 /* we simply pass a null pointer */
10560 EMIT_NEW_PCONST (cfg, *sp, NULL);
10561 /* now call the string ctor */
10562 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10564 if (cmethod->klass->valuetype) {
10565 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10566 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10567 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10572 * The code generated by mini_emit_virtual_call () expects
10573 * iargs [0] to be a boxed instance, but luckily the vcall
10574 * will be transformed into a normal call there.
10576 } else if (context_used) {
10577 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10580 MonoVTable *vtable = NULL;
10582 if (!cfg->compile_aot)
10583 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10584 CHECK_TYPELOAD (cmethod->klass);
10587 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10588 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10589 * As a workaround, we call class cctors before allocating objects.
10591 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10592 emit_class_init (cfg, cmethod->klass);
10593 if (cfg->verbose_level > 2)
10594 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10595 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10598 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10601 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10604 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10606 /* Now call the actual ctor */
10607 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10608 CHECK_CFG_EXCEPTION;
10611 if (alloc == NULL) {
10613 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10614 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10622 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10623 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10626 case CEE_CASTCLASS:
10630 token = read32 (ip + 1);
10631 klass = mini_get_class (method, token, generic_context);
10632 CHECK_TYPELOAD (klass);
10633 if (sp [0]->type != STACK_OBJ)
10636 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10637 CHECK_CFG_EXCEPTION;
10646 token = read32 (ip + 1);
10647 klass = mini_get_class (method, token, generic_context);
10648 CHECK_TYPELOAD (klass);
10649 if (sp [0]->type != STACK_OBJ)
10652 context_used = mini_class_check_context_used (cfg, klass);
10654 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10655 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10656 MonoInst *args [3];
10663 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10666 if (cfg->compile_aot) {
10667 idx = get_castclass_cache_idx (cfg);
10668 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10670 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10673 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10676 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10677 MonoMethod *mono_isinst;
10678 MonoInst *iargs [1];
10681 mono_isinst = mono_marshal_get_isinst (klass);
10682 iargs [0] = sp [0];
10684 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10685 iargs, ip, cfg->real_offset, TRUE);
10686 CHECK_CFG_EXCEPTION;
10687 g_assert (costs > 0);
10690 cfg->real_offset += 5;
10694 inline_costs += costs;
10697 ins = handle_isinst (cfg, klass, *sp, context_used);
10698 CHECK_CFG_EXCEPTION;
10704 case CEE_UNBOX_ANY: {
10705 MonoInst *res, *addr;
10710 token = read32 (ip + 1);
10711 klass = mini_get_class (method, token, generic_context);
10712 CHECK_TYPELOAD (klass);
10714 mono_save_token_info (cfg, image, token, klass);
10716 context_used = mini_class_check_context_used (cfg, klass);
10718 if (mini_is_gsharedvt_klass (klass)) {
10719 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10721 } else if (generic_class_is_reference_type (cfg, klass)) {
10722 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10723 CHECK_CFG_EXCEPTION;
10724 } else if (mono_class_is_nullable (klass)) {
10725 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10727 addr = handle_unbox (cfg, klass, sp, context_used);
10729 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10740 MonoClass *enum_class;
10741 MonoMethod *has_flag;
10747 token = read32 (ip + 1);
10748 klass = mini_get_class (method, token, generic_context);
10749 CHECK_TYPELOAD (klass);
10751 mono_save_token_info (cfg, image, token, klass);
10753 context_used = mini_class_check_context_used (cfg, klass);
10755 if (generic_class_is_reference_type (cfg, klass)) {
10761 if (klass == mono_defaults.void_class)
10763 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10765 /* frequent check in generic code: box (struct), brtrue */
10770 * <push int/long ptr>
10773 * constrained. MyFlags
10774 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10776 * If we find this sequence and the operand types on box and constrained
10777 * are equal, we can emit a specialized instruction sequence instead of
10778 * the very slow HasFlag () call.
10780 if ((cfg->opt & MONO_OPT_INTRINS) &&
10781 /* Cheap checks first. */
10782 ip + 5 + 6 + 5 < end &&
10783 ip [5] == CEE_PREFIX1 &&
10784 ip [6] == CEE_CONSTRAINED_ &&
10785 ip [11] == CEE_CALLVIRT &&
10786 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10787 mono_class_is_enum (klass) &&
10788 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10789 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10790 has_flag->klass == mono_defaults.enum_class &&
10791 !strcmp (has_flag->name, "HasFlag") &&
10792 has_flag->signature->hasthis &&
10793 has_flag->signature->param_count == 1) {
10794 CHECK_TYPELOAD (enum_class);
10796 if (enum_class == klass) {
10797 MonoInst *enum_this, *enum_flag;
10802 enum_this = sp [0];
10803 enum_flag = sp [1];
10805 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10810 // FIXME: LLVM can't handle the inconsistent bb linking
10811 if (!mono_class_is_nullable (klass) &&
10812 !mini_is_gsharedvt_klass (klass) &&
10813 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10814 (ip [5] == CEE_BRTRUE ||
10815 ip [5] == CEE_BRTRUE_S ||
10816 ip [5] == CEE_BRFALSE ||
10817 ip [5] == CEE_BRFALSE_S)) {
10818 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10820 MonoBasicBlock *true_bb, *false_bb;
10824 if (cfg->verbose_level > 3) {
10825 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10826 printf ("<box+brtrue opt>\n");
10831 case CEE_BRFALSE_S:
10834 target = ip + 1 + (signed char)(*ip);
10841 target = ip + 4 + (gint)(read32 (ip));
10845 g_assert_not_reached ();
10849 * We need to link both bblocks, since it is needed for handling stack
10850 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10851 * Branching to only one of them would lead to inconsistencies, so
10852 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10854 GET_BBLOCK (cfg, true_bb, target);
10855 GET_BBLOCK (cfg, false_bb, ip);
10857 mono_link_bblock (cfg, cfg->cbb, true_bb);
10858 mono_link_bblock (cfg, cfg->cbb, false_bb);
10860 if (sp != stack_start) {
10861 handle_stack_args (cfg, stack_start, sp - stack_start);
10863 CHECK_UNVERIFIABLE (cfg);
10866 if (COMPILE_LLVM (cfg)) {
10867 dreg = alloc_ireg (cfg);
10868 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10869 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10871 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10873 /* The JIT can't eliminate the iconst+compare */
10874 MONO_INST_NEW (cfg, ins, OP_BR);
10875 ins->inst_target_bb = is_true ? true_bb : false_bb;
10876 MONO_ADD_INS (cfg->cbb, ins);
10879 start_new_bblock = 1;
10883 *sp++ = handle_box (cfg, val, klass, context_used);
10885 CHECK_CFG_EXCEPTION;
10894 token = read32 (ip + 1);
10895 klass = mini_get_class (method, token, generic_context);
10896 CHECK_TYPELOAD (klass);
10898 mono_save_token_info (cfg, image, token, klass);
10900 context_used = mini_class_check_context_used (cfg, klass);
10902 if (mono_class_is_nullable (klass)) {
10905 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10906 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10910 ins = handle_unbox (cfg, klass, sp, context_used);
10923 MonoClassField *field;
10924 #ifndef DISABLE_REMOTING
10928 gboolean is_instance;
10930 gpointer addr = NULL;
10931 gboolean is_special_static;
10933 MonoInst *store_val = NULL;
10934 MonoInst *thread_ins;
10937 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10939 if (op == CEE_STFLD) {
10942 store_val = sp [1];
10947 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10949 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10952 if (op == CEE_STSFLD) {
10955 store_val = sp [0];
10960 token = read32 (ip + 1);
10961 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10962 field = mono_method_get_wrapper_data (method, token);
10963 klass = field->parent;
10966 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10969 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10970 FIELD_ACCESS_FAILURE (method, field);
10971 mono_class_init (klass);
10973 /* if the class is Critical then transparent code cannot access it's fields */
10974 if (!is_instance && mono_security_core_clr_enabled ())
10975 ensure_method_is_allowed_to_access_field (cfg, method, field);
10977 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10978 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10979 if (mono_security_core_clr_enabled ())
10980 ensure_method_is_allowed_to_access_field (cfg, method, field);
10983 ftype = mono_field_get_type (field);
10986 * LDFLD etc. is usable on static fields as well, so convert those cases to
10989 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11001 g_assert_not_reached ();
11003 is_instance = FALSE;
11006 context_used = mini_class_check_context_used (cfg, klass);
11008 /* INSTANCE CASE */
11010 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11011 if (op == CEE_STFLD) {
11012 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11014 #ifndef DISABLE_REMOTING
11015 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11016 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11017 MonoInst *iargs [5];
11019 GSHAREDVT_FAILURE (op);
11021 iargs [0] = sp [0];
11022 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11023 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11024 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11026 iargs [4] = sp [1];
11028 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11029 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11030 iargs, ip, cfg->real_offset, TRUE);
11031 CHECK_CFG_EXCEPTION;
11032 g_assert (costs > 0);
11034 cfg->real_offset += 5;
11036 inline_costs += costs;
11038 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11045 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11047 if (mini_is_gsharedvt_klass (klass)) {
11048 MonoInst *offset_ins;
11050 context_used = mini_class_check_context_used (cfg, klass);
11052 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11053 dreg = alloc_ireg_mp (cfg);
11054 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11055 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11056 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11058 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11060 if (sp [0]->opcode != OP_LDADDR)
11061 store->flags |= MONO_INST_FAULT;
11063 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11064 /* insert call to write barrier */
11068 dreg = alloc_ireg_mp (cfg);
11069 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11070 emit_write_barrier (cfg, ptr, sp [1]);
11073 store->flags |= ins_flag;
11080 #ifndef DISABLE_REMOTING
11081 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11082 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11083 MonoInst *iargs [4];
11085 GSHAREDVT_FAILURE (op);
11087 iargs [0] = sp [0];
11088 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11089 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11090 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11091 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11092 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11093 iargs, ip, cfg->real_offset, TRUE);
11094 CHECK_CFG_EXCEPTION;
11095 g_assert (costs > 0);
11097 cfg->real_offset += 5;
11101 inline_costs += costs;
11103 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11109 if (sp [0]->type == STACK_VTYPE) {
11112 /* Have to compute the address of the variable */
11114 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11116 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11118 g_assert (var->klass == klass);
11120 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11124 if (op == CEE_LDFLDA) {
11125 if (sp [0]->type == STACK_OBJ) {
11126 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11127 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11130 dreg = alloc_ireg_mp (cfg);
11132 if (mini_is_gsharedvt_klass (klass)) {
11133 MonoInst *offset_ins;
11135 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11136 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11138 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11140 ins->klass = mono_class_from_mono_type (field->type);
11141 ins->type = STACK_MP;
11146 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11148 if (mini_is_gsharedvt_klass (klass)) {
11149 MonoInst *offset_ins;
11151 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11152 dreg = alloc_ireg_mp (cfg);
11153 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11154 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11156 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11158 load->flags |= ins_flag;
11159 if (sp [0]->opcode != OP_LDADDR)
11160 load->flags |= MONO_INST_FAULT;
11172 context_used = mini_class_check_context_used (cfg, klass);
11174 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11177 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11178 * to be called here.
11180 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11181 mono_class_vtable (cfg->domain, klass);
11182 CHECK_TYPELOAD (klass);
11184 mono_domain_lock (cfg->domain);
11185 if (cfg->domain->special_static_fields)
11186 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11187 mono_domain_unlock (cfg->domain);
11189 is_special_static = mono_class_field_is_special_static (field);
11191 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11192 thread_ins = mono_get_thread_intrinsic (cfg);
11196 /* Generate IR to compute the field address */
11197 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11199 * Fast access to TLS data
11200 * Inline version of get_thread_static_data () in
11204 int idx, static_data_reg, array_reg, dreg;
11206 GSHAREDVT_FAILURE (op);
11208 MONO_ADD_INS (cfg->cbb, thread_ins);
11209 static_data_reg = alloc_ireg (cfg);
11210 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11212 if (cfg->compile_aot) {
11213 int offset_reg, offset2_reg, idx_reg;
11215 /* For TLS variables, this will return the TLS offset */
11216 EMIT_NEW_SFLDACONST (cfg, ins, field);
11217 offset_reg = ins->dreg;
11218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11219 idx_reg = alloc_ireg (cfg);
11220 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11222 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11223 array_reg = alloc_ireg (cfg);
11224 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11225 offset2_reg = alloc_ireg (cfg);
11226 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11228 dreg = alloc_ireg (cfg);
11229 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11231 offset = (gsize)addr & 0x7fffffff;
11232 idx = offset & 0x3f;
11234 array_reg = alloc_ireg (cfg);
11235 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11236 dreg = alloc_ireg (cfg);
11237 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11239 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11240 (cfg->compile_aot && is_special_static) ||
11241 (context_used && is_special_static)) {
11242 MonoInst *iargs [2];
11244 g_assert (field->parent);
11245 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11246 if (context_used) {
11247 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11248 field, MONO_RGCTX_INFO_CLASS_FIELD);
11250 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11252 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11253 } else if (context_used) {
11254 MonoInst *static_data;
11257 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11258 method->klass->name_space, method->klass->name, method->name,
11259 depth, field->offset);
11262 if (mono_class_needs_cctor_run (klass, method))
11263 emit_class_init (cfg, klass);
11266 * The pointer we're computing here is
11268 * super_info.static_data + field->offset
11270 static_data = emit_get_rgctx_klass (cfg, context_used,
11271 klass, MONO_RGCTX_INFO_STATIC_DATA);
11273 if (mini_is_gsharedvt_klass (klass)) {
11274 MonoInst *offset_ins;
11276 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11277 dreg = alloc_ireg_mp (cfg);
11278 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11279 } else if (field->offset == 0) {
11282 int addr_reg = mono_alloc_preg (cfg);
11283 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11285 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11286 MonoInst *iargs [2];
11288 g_assert (field->parent);
11289 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11290 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11291 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11293 MonoVTable *vtable = NULL;
11295 if (!cfg->compile_aot)
11296 vtable = mono_class_vtable (cfg->domain, klass);
11297 CHECK_TYPELOAD (klass);
11300 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11301 if (!(g_slist_find (class_inits, klass))) {
11302 emit_class_init (cfg, klass);
11303 if (cfg->verbose_level > 2)
11304 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11305 class_inits = g_slist_prepend (class_inits, klass);
11308 if (cfg->run_cctors) {
11310 /* This makes so that inline cannot trigger */
11311 /* .cctors: too many apps depend on them */
11312 /* running with a specific order... */
11314 if (! vtable->initialized)
11315 INLINE_FAILURE ("class init");
11316 ex = mono_runtime_class_init_full (vtable, FALSE);
11318 set_exception_object (cfg, ex);
11319 goto exception_exit;
11323 if (cfg->compile_aot)
11324 EMIT_NEW_SFLDACONST (cfg, ins, field);
11327 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11329 EMIT_NEW_PCONST (cfg, ins, addr);
11332 MonoInst *iargs [1];
11333 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11334 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11338 /* Generate IR to do the actual load/store operation */
11340 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11341 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11342 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11345 if (op == CEE_LDSFLDA) {
11346 ins->klass = mono_class_from_mono_type (ftype);
11347 ins->type = STACK_PTR;
11349 } else if (op == CEE_STSFLD) {
11352 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11353 store->flags |= ins_flag;
11355 gboolean is_const = FALSE;
11356 MonoVTable *vtable = NULL;
11357 gpointer addr = NULL;
11359 if (!context_used) {
11360 vtable = mono_class_vtable (cfg->domain, klass);
11361 CHECK_TYPELOAD (klass);
11363 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11364 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11365 int ro_type = ftype->type;
11367 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11368 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11369 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11372 GSHAREDVT_FAILURE (op);
11374 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11377 case MONO_TYPE_BOOLEAN:
11379 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11383 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11386 case MONO_TYPE_CHAR:
11388 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11392 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11397 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11401 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11406 case MONO_TYPE_PTR:
11407 case MONO_TYPE_FNPTR:
11408 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11409 type_to_eval_stack_type ((cfg), field->type, *sp);
11412 case MONO_TYPE_STRING:
11413 case MONO_TYPE_OBJECT:
11414 case MONO_TYPE_CLASS:
11415 case MONO_TYPE_SZARRAY:
11416 case MONO_TYPE_ARRAY:
11417 if (!mono_gc_is_moving ()) {
11418 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11419 type_to_eval_stack_type ((cfg), field->type, *sp);
11427 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11432 case MONO_TYPE_VALUETYPE:
11442 CHECK_STACK_OVF (1);
11444 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11445 load->flags |= ins_flag;
11451 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11452 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11453 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11464 token = read32 (ip + 1);
11465 klass = mini_get_class (method, token, generic_context);
11466 CHECK_TYPELOAD (klass);
11467 if (ins_flag & MONO_INST_VOLATILE) {
11468 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11469 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11471 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11472 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11473 ins->flags |= ins_flag;
11474 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11475 generic_class_is_reference_type (cfg, klass)) {
11476 /* insert call to write barrier */
11477 emit_write_barrier (cfg, sp [0], sp [1]);
11489 const char *data_ptr;
11491 guint32 field_token;
11497 token = read32 (ip + 1);
11499 klass = mini_get_class (method, token, generic_context);
11500 CHECK_TYPELOAD (klass);
11502 context_used = mini_class_check_context_used (cfg, klass);
11504 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11505 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11506 ins->sreg1 = sp [0]->dreg;
11507 ins->type = STACK_I4;
11508 ins->dreg = alloc_ireg (cfg);
11509 MONO_ADD_INS (cfg->cbb, ins);
11510 *sp = mono_decompose_opcode (cfg, ins);
11513 if (context_used) {
11514 MonoInst *args [3];
11515 MonoClass *array_class = mono_array_class_get (klass, 1);
11516 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11518 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11521 args [0] = emit_get_rgctx_klass (cfg, context_used,
11522 array_class, MONO_RGCTX_INFO_VTABLE);
11527 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11529 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11531 if (cfg->opt & MONO_OPT_SHARED) {
11532 /* Decompose now to avoid problems with references to the domainvar */
11533 MonoInst *iargs [3];
11535 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11536 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11537 iargs [2] = sp [0];
11539 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11541 /* Decompose later since it is needed by abcrem */
11542 MonoClass *array_type = mono_array_class_get (klass, 1);
11543 mono_class_vtable (cfg->domain, array_type);
11544 CHECK_TYPELOAD (array_type);
11546 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11547 ins->dreg = alloc_ireg_ref (cfg);
11548 ins->sreg1 = sp [0]->dreg;
11549 ins->inst_newa_class = klass;
11550 ins->type = STACK_OBJ;
11551 ins->klass = array_type;
11552 MONO_ADD_INS (cfg->cbb, ins);
11553 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11554 cfg->cbb->has_array_access = TRUE;
11556 /* Needed so mono_emit_load_get_addr () gets called */
11557 mono_get_got_var (cfg);
11567 * we inline/optimize the initialization sequence if possible.
11568 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11569 * for small sizes open code the memcpy
11570 * ensure the rva field is big enough
11572 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11573 MonoMethod *memcpy_method = get_memcpy_method ();
11574 MonoInst *iargs [3];
11575 int add_reg = alloc_ireg_mp (cfg);
11577 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11578 if (cfg->compile_aot) {
11579 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11581 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11583 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11584 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11593 if (sp [0]->type != STACK_OBJ)
11596 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11597 ins->dreg = alloc_preg (cfg);
11598 ins->sreg1 = sp [0]->dreg;
11599 ins->type = STACK_I4;
11600 /* This flag will be inherited by the decomposition */
11601 ins->flags |= MONO_INST_FAULT;
11602 MONO_ADD_INS (cfg->cbb, ins);
11603 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11604 cfg->cbb->has_array_access = TRUE;
11612 if (sp [0]->type != STACK_OBJ)
11615 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11617 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11618 CHECK_TYPELOAD (klass);
11619 /* we need to make sure that this array is exactly the type it needs
11620 * to be for correctness. the wrappers are lax with their usage
11621 * so we need to ignore them here
11623 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11624 MonoClass *array_class = mono_array_class_get (klass, 1);
11625 mini_emit_check_array_type (cfg, sp [0], array_class);
11626 CHECK_TYPELOAD (array_class);
11630 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11635 case CEE_LDELEM_I1:
11636 case CEE_LDELEM_U1:
11637 case CEE_LDELEM_I2:
11638 case CEE_LDELEM_U2:
11639 case CEE_LDELEM_I4:
11640 case CEE_LDELEM_U4:
11641 case CEE_LDELEM_I8:
11643 case CEE_LDELEM_R4:
11644 case CEE_LDELEM_R8:
11645 case CEE_LDELEM_REF: {
11651 if (*ip == CEE_LDELEM) {
11653 token = read32 (ip + 1);
11654 klass = mini_get_class (method, token, generic_context);
11655 CHECK_TYPELOAD (klass);
11656 mono_class_init (klass);
11659 klass = array_access_to_klass (*ip);
11661 if (sp [0]->type != STACK_OBJ)
11664 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11666 if (mini_is_gsharedvt_variable_klass (klass)) {
11667 // FIXME-VT: OP_ICONST optimization
11668 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11669 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11670 ins->opcode = OP_LOADV_MEMBASE;
11671 } else if (sp [1]->opcode == OP_ICONST) {
11672 int array_reg = sp [0]->dreg;
11673 int index_reg = sp [1]->dreg;
11674 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11676 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11677 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11679 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11683 if (*ip == CEE_LDELEM)
11690 case CEE_STELEM_I1:
11691 case CEE_STELEM_I2:
11692 case CEE_STELEM_I4:
11693 case CEE_STELEM_I8:
11694 case CEE_STELEM_R4:
11695 case CEE_STELEM_R8:
11696 case CEE_STELEM_REF:
11701 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11703 if (*ip == CEE_STELEM) {
11705 token = read32 (ip + 1);
11706 klass = mini_get_class (method, token, generic_context);
11707 CHECK_TYPELOAD (klass);
11708 mono_class_init (klass);
11711 klass = array_access_to_klass (*ip);
11713 if (sp [0]->type != STACK_OBJ)
11716 emit_array_store (cfg, klass, sp, TRUE);
11718 if (*ip == CEE_STELEM)
11725 case CEE_CKFINITE: {
11729 if (cfg->llvm_only) {
11730 MonoInst *iargs [1];
11732 iargs [0] = sp [0];
11733 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11735 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11736 ins->sreg1 = sp [0]->dreg;
11737 ins->dreg = alloc_freg (cfg);
11738 ins->type = STACK_R8;
11739 MONO_ADD_INS (cfg->cbb, ins);
11741 *sp++ = mono_decompose_opcode (cfg, ins);
11747 case CEE_REFANYVAL: {
11748 MonoInst *src_var, *src;
11750 int klass_reg = alloc_preg (cfg);
11751 int dreg = alloc_preg (cfg);
11753 GSHAREDVT_FAILURE (*ip);
11756 MONO_INST_NEW (cfg, ins, *ip);
11759 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11760 CHECK_TYPELOAD (klass);
11762 context_used = mini_class_check_context_used (cfg, klass);
11765 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11767 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11768 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11771 if (context_used) {
11772 MonoInst *klass_ins;
11774 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11775 klass, MONO_RGCTX_INFO_KLASS);
11778 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11779 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11781 mini_emit_class_check (cfg, klass_reg, klass);
11783 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11784 ins->type = STACK_MP;
11785 ins->klass = klass;
11790 case CEE_MKREFANY: {
11791 MonoInst *loc, *addr;
11793 GSHAREDVT_FAILURE (*ip);
11796 MONO_INST_NEW (cfg, ins, *ip);
11799 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11800 CHECK_TYPELOAD (klass);
11802 context_used = mini_class_check_context_used (cfg, klass);
11804 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11805 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11807 if (context_used) {
11808 MonoInst *const_ins;
11809 int type_reg = alloc_preg (cfg);
11811 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11815 } else if (cfg->compile_aot) {
11816 int const_reg = alloc_preg (cfg);
11817 int type_reg = alloc_preg (cfg);
11819 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11821 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11822 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11824 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11825 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11827 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11829 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11830 ins->type = STACK_VTYPE;
11831 ins->klass = mono_defaults.typed_reference_class;
11836 case CEE_LDTOKEN: {
11838 MonoClass *handle_class;
11840 CHECK_STACK_OVF (1);
11843 n = read32 (ip + 1);
11845 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11846 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11847 handle = mono_method_get_wrapper_data (method, n);
11848 handle_class = mono_method_get_wrapper_data (method, n + 1);
11849 if (handle_class == mono_defaults.typehandle_class)
11850 handle = &((MonoClass*)handle)->byval_arg;
11853 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11858 mono_class_init (handle_class);
11859 if (cfg->gshared) {
11860 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11861 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11862 /* This case handles ldtoken
11863 of an open type, like for
11866 } else if (handle_class == mono_defaults.typehandle_class) {
11867 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11868 } else if (handle_class == mono_defaults.fieldhandle_class)
11869 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11870 else if (handle_class == mono_defaults.methodhandle_class)
11871 context_used = mini_method_check_context_used (cfg, handle);
11873 g_assert_not_reached ();
11876 if ((cfg->opt & MONO_OPT_SHARED) &&
11877 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11878 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11879 MonoInst *addr, *vtvar, *iargs [3];
11880 int method_context_used;
11882 method_context_used = mini_method_check_context_used (cfg, method);
11884 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11886 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11887 EMIT_NEW_ICONST (cfg, iargs [1], n);
11888 if (method_context_used) {
11889 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11890 method, MONO_RGCTX_INFO_METHOD);
11891 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11893 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11894 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11896 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11898 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11900 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11902 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11903 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11904 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11905 (cmethod->klass == mono_defaults.systemtype_class) &&
11906 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11907 MonoClass *tclass = mono_class_from_mono_type (handle);
11909 mono_class_init (tclass);
11910 if (context_used) {
11911 ins = emit_get_rgctx_klass (cfg, context_used,
11912 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11913 } else if (cfg->compile_aot) {
11914 if (method->wrapper_type) {
11915 mono_error_init (&error); //got to do it since there are multiple conditionals below
11916 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11917 /* Special case for static synchronized wrappers */
11918 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11920 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11921 /* FIXME: n is not a normal token */
11923 EMIT_NEW_PCONST (cfg, ins, NULL);
11926 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11929 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11931 ins->type = STACK_OBJ;
11932 ins->klass = cmethod->klass;
11935 MonoInst *addr, *vtvar;
11937 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11939 if (context_used) {
11940 if (handle_class == mono_defaults.typehandle_class) {
11941 ins = emit_get_rgctx_klass (cfg, context_used,
11942 mono_class_from_mono_type (handle),
11943 MONO_RGCTX_INFO_TYPE);
11944 } else if (handle_class == mono_defaults.methodhandle_class) {
11945 ins = emit_get_rgctx_method (cfg, context_used,
11946 handle, MONO_RGCTX_INFO_METHOD);
11947 } else if (handle_class == mono_defaults.fieldhandle_class) {
11948 ins = emit_get_rgctx_field (cfg, context_used,
11949 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11951 g_assert_not_reached ();
11953 } else if (cfg->compile_aot) {
11954 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11956 EMIT_NEW_PCONST (cfg, ins, handle);
11958 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11960 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11970 MONO_INST_NEW (cfg, ins, OP_THROW);
11972 ins->sreg1 = sp [0]->dreg;
11974 cfg->cbb->out_of_line = TRUE;
11975 MONO_ADD_INS (cfg->cbb, ins);
11976 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11977 MONO_ADD_INS (cfg->cbb, ins);
11980 link_bblock (cfg, cfg->cbb, end_bblock);
11981 start_new_bblock = 1;
11982 /* This can complicate code generation for llvm since the return value might not be defined */
11983 if (COMPILE_LLVM (cfg))
11984 INLINE_FAILURE ("throw");
11986 case CEE_ENDFINALLY:
11987 /* mono_save_seq_point_info () depends on this */
11988 if (sp != stack_start)
11989 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11990 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11991 MONO_ADD_INS (cfg->cbb, ins);
11993 start_new_bblock = 1;
11996 * Control will leave the method so empty the stack, otherwise
11997 * the next basic block will start with a nonempty stack.
11999 while (sp != stack_start) {
12004 case CEE_LEAVE_S: {
12007 if (*ip == CEE_LEAVE) {
12009 target = ip + 5 + (gint32)read32(ip + 1);
12012 target = ip + 2 + (signed char)(ip [1]);
12015 /* empty the stack */
12016 while (sp != stack_start) {
12021 * If this leave statement is in a catch block, check for a
12022 * pending exception, and rethrow it if necessary.
12023 * We avoid doing this in runtime invoke wrappers, since those are called
12024 * by native code which excepts the wrapper to catch all exceptions.
12026 for (i = 0; i < header->num_clauses; ++i) {
12027 MonoExceptionClause *clause = &header->clauses [i];
12030 * Use <= in the final comparison to handle clauses with multiple
12031 * leave statements, like in bug #78024.
12032 * The ordering of the exception clauses guarantees that we find the
12033 * innermost clause.
12035 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12037 MonoBasicBlock *dont_throw;
12042 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12045 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12047 NEW_BBLOCK (cfg, dont_throw);
12050 * Currently, we always rethrow the abort exception, despite the
12051 * fact that this is not correct. See thread6.cs for an example.
12052 * But propagating the abort exception is more important than
12053 * getting the sematics right.
12055 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12056 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12057 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12059 MONO_START_BB (cfg, dont_throw);
12064 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12067 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12069 MonoExceptionClause *clause;
12071 for (tmp = handlers; tmp; tmp = tmp->next) {
12072 clause = tmp->data;
12073 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12075 link_bblock (cfg, cfg->cbb, tblock);
12076 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12077 ins->inst_target_bb = tblock;
12078 ins->inst_eh_block = clause;
12079 MONO_ADD_INS (cfg->cbb, ins);
12080 cfg->cbb->has_call_handler = 1;
12081 if (COMPILE_LLVM (cfg)) {
12082 MonoBasicBlock *target_bb;
12085 * Link the finally bblock with the target, since it will
12086 * conceptually branch there.
12087 * FIXME: Have to link the bblock containing the endfinally.
12089 GET_BBLOCK (cfg, target_bb, target);
12090 link_bblock (cfg, tblock, target_bb);
12093 g_list_free (handlers);
12096 MONO_INST_NEW (cfg, ins, OP_BR);
12097 MONO_ADD_INS (cfg->cbb, ins);
12098 GET_BBLOCK (cfg, tblock, target);
12099 link_bblock (cfg, cfg->cbb, tblock);
12100 ins->inst_target_bb = tblock;
12102 start_new_bblock = 1;
12104 if (*ip == CEE_LEAVE)
12113 * Mono specific opcodes
12115 case MONO_CUSTOM_PREFIX: {
12117 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12121 case CEE_MONO_ICALL: {
12123 MonoJitICallInfo *info;
12125 token = read32 (ip + 2);
12126 func = mono_method_get_wrapper_data (method, token);
12127 info = mono_find_jit_icall_by_addr (func);
12129 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12132 CHECK_STACK (info->sig->param_count);
12133 sp -= info->sig->param_count;
12135 ins = mono_emit_jit_icall (cfg, info->func, sp);
12136 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12140 inline_costs += 10 * num_calls++;
12144 case CEE_MONO_LDPTR_CARD_TABLE: {
12146 gpointer card_mask;
12147 CHECK_STACK_OVF (1);
12149 if (cfg->compile_aot)
12150 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12152 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
12156 inline_costs += 10 * num_calls++;
12159 case CEE_MONO_LDPTR_NURSERY_START: {
12162 CHECK_STACK_OVF (1);
12164 if (cfg->compile_aot)
12165 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12167 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
12171 inline_costs += 10 * num_calls++;
12174 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12175 CHECK_STACK_OVF (1);
12177 if (cfg->compile_aot)
12178 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12180 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
12184 inline_costs += 10 * num_calls++;
12187 case CEE_MONO_LDPTR: {
12190 CHECK_STACK_OVF (1);
12192 token = read32 (ip + 2);
12194 ptr = mono_method_get_wrapper_data (method, token);
12195 EMIT_NEW_PCONST (cfg, ins, ptr);
12198 inline_costs += 10 * num_calls++;
12199 /* Can't embed random pointers into AOT code */
12203 case CEE_MONO_JIT_ICALL_ADDR: {
12204 MonoJitICallInfo *callinfo;
12207 CHECK_STACK_OVF (1);
12209 token = read32 (ip + 2);
12211 ptr = mono_method_get_wrapper_data (method, token);
12212 callinfo = mono_find_jit_icall_by_addr (ptr);
12213 g_assert (callinfo);
12214 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12217 inline_costs += 10 * num_calls++;
12220 case CEE_MONO_ICALL_ADDR: {
12221 MonoMethod *cmethod;
12224 CHECK_STACK_OVF (1);
12226 token = read32 (ip + 2);
12228 cmethod = mono_method_get_wrapper_data (method, token);
12230 if (cfg->compile_aot) {
12231 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12233 ptr = mono_lookup_internal_call (cmethod);
12235 EMIT_NEW_PCONST (cfg, ins, ptr);
12241 case CEE_MONO_VTADDR: {
12242 MonoInst *src_var, *src;
12248 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12249 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12254 case CEE_MONO_NEWOBJ: {
12255 MonoInst *iargs [2];
12257 CHECK_STACK_OVF (1);
12259 token = read32 (ip + 2);
12260 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12261 mono_class_init (klass);
12262 NEW_DOMAINCONST (cfg, iargs [0]);
12263 MONO_ADD_INS (cfg->cbb, iargs [0]);
12264 NEW_CLASSCONST (cfg, iargs [1], klass);
12265 MONO_ADD_INS (cfg->cbb, iargs [1]);
12266 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12268 inline_costs += 10 * num_calls++;
12271 case CEE_MONO_OBJADDR:
12274 MONO_INST_NEW (cfg, ins, OP_MOVE);
12275 ins->dreg = alloc_ireg_mp (cfg);
12276 ins->sreg1 = sp [0]->dreg;
12277 ins->type = STACK_MP;
12278 MONO_ADD_INS (cfg->cbb, ins);
12282 case CEE_MONO_LDNATIVEOBJ:
12284 * Similar to LDOBJ, but instead load the unmanaged
12285 * representation of the vtype to the stack.
12290 token = read32 (ip + 2);
12291 klass = mono_method_get_wrapper_data (method, token);
12292 g_assert (klass->valuetype);
12293 mono_class_init (klass);
12296 MonoInst *src, *dest, *temp;
12299 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12300 temp->backend.is_pinvoke = 1;
12301 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12302 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12304 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12305 dest->type = STACK_VTYPE;
12306 dest->klass = klass;
12312 case CEE_MONO_RETOBJ: {
12314 * Same as RET, but return the native representation of a vtype
12317 g_assert (cfg->ret);
12318 g_assert (mono_method_signature (method)->pinvoke);
12323 token = read32 (ip + 2);
12324 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12326 if (!cfg->vret_addr) {
12327 g_assert (cfg->ret_var_is_local);
12329 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12331 EMIT_NEW_RETLOADA (cfg, ins);
12333 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12335 if (sp != stack_start)
12338 MONO_INST_NEW (cfg, ins, OP_BR);
12339 ins->inst_target_bb = end_bblock;
12340 MONO_ADD_INS (cfg->cbb, ins);
12341 link_bblock (cfg, cfg->cbb, end_bblock);
12342 start_new_bblock = 1;
12346 case CEE_MONO_CISINST:
12347 case CEE_MONO_CCASTCLASS: {
12352 token = read32 (ip + 2);
12353 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12354 if (ip [1] == CEE_MONO_CISINST)
12355 ins = handle_cisinst (cfg, klass, sp [0]);
12357 ins = handle_ccastclass (cfg, klass, sp [0]);
12362 case CEE_MONO_SAVE_LMF:
12363 case CEE_MONO_RESTORE_LMF:
12366 case CEE_MONO_CLASSCONST:
12367 CHECK_STACK_OVF (1);
12369 token = read32 (ip + 2);
12370 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12373 inline_costs += 10 * num_calls++;
12375 case CEE_MONO_NOT_TAKEN:
12376 cfg->cbb->out_of_line = TRUE;
12379 case CEE_MONO_TLS: {
12382 CHECK_STACK_OVF (1);
12384 key = (gint32)read32 (ip + 2);
12385 g_assert (key < TLS_KEY_NUM);
12387 ins = mono_create_tls_get (cfg, key);
12389 if (cfg->compile_aot) {
12391 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12392 ins->dreg = alloc_preg (cfg);
12393 ins->type = STACK_PTR;
12395 g_assert_not_reached ();
12398 ins->type = STACK_PTR;
12399 MONO_ADD_INS (cfg->cbb, ins);
12404 case CEE_MONO_DYN_CALL: {
12405 MonoCallInst *call;
12407 /* It would be easier to call a trampoline, but that would put an
12408 * extra frame on the stack, confusing exception handling. So
12409 * implement it inline using an opcode for now.
12412 if (!cfg->dyn_call_var) {
12413 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12414 /* prevent it from being register allocated */
12415 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12418 /* Has to use a call inst since it local regalloc expects it */
12419 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12420 ins = (MonoInst*)call;
12422 ins->sreg1 = sp [0]->dreg;
12423 ins->sreg2 = sp [1]->dreg;
12424 MONO_ADD_INS (cfg->cbb, ins);
12426 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12429 inline_costs += 10 * num_calls++;
12433 case CEE_MONO_MEMORY_BARRIER: {
12435 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12439 case CEE_MONO_JIT_ATTACH: {
12440 MonoInst *args [16], *domain_ins;
12441 MonoInst *ad_ins, *jit_tls_ins;
12442 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12444 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12446 EMIT_NEW_PCONST (cfg, ins, NULL);
12447 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12449 ad_ins = mono_get_domain_intrinsic (cfg);
12450 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12452 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12453 NEW_BBLOCK (cfg, next_bb);
12454 NEW_BBLOCK (cfg, call_bb);
12456 if (cfg->compile_aot) {
12457 /* AOT code is only used in the root domain */
12458 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12460 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12462 MONO_ADD_INS (cfg->cbb, ad_ins);
12463 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12464 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12466 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12467 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12468 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12471 MONO_START_BB (cfg, call_bb);
12474 if (cfg->compile_aot) {
12475 /* AOT code is only used in the root domain */
12476 EMIT_NEW_PCONST (cfg, args [0], NULL);
12478 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12480 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12481 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12484 MONO_START_BB (cfg, next_bb);
12488 case CEE_MONO_JIT_DETACH: {
12489 MonoInst *args [16];
12491 /* Restore the original domain */
12492 dreg = alloc_ireg (cfg);
12493 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12494 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12499 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12505 case CEE_PREFIX1: {
12508 case CEE_ARGLIST: {
12509 /* somewhat similar to LDTOKEN */
12510 MonoInst *addr, *vtvar;
12511 CHECK_STACK_OVF (1);
12512 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12514 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12515 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12517 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12518 ins->type = STACK_VTYPE;
12519 ins->klass = mono_defaults.argumenthandle_class;
12529 MonoInst *cmp, *arg1, *arg2;
12537 * The following transforms:
12538 * CEE_CEQ into OP_CEQ
12539 * CEE_CGT into OP_CGT
12540 * CEE_CGT_UN into OP_CGT_UN
12541 * CEE_CLT into OP_CLT
12542 * CEE_CLT_UN into OP_CLT_UN
12544 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12546 MONO_INST_NEW (cfg, ins, cmp->opcode);
12547 cmp->sreg1 = arg1->dreg;
12548 cmp->sreg2 = arg2->dreg;
12549 type_from_op (cfg, cmp, arg1, arg2);
12551 add_widen_op (cfg, cmp, &arg1, &arg2);
12552 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12553 cmp->opcode = OP_LCOMPARE;
12554 else if (arg1->type == STACK_R4)
12555 cmp->opcode = OP_RCOMPARE;
12556 else if (arg1->type == STACK_R8)
12557 cmp->opcode = OP_FCOMPARE;
12559 cmp->opcode = OP_ICOMPARE;
12560 MONO_ADD_INS (cfg->cbb, cmp);
12561 ins->type = STACK_I4;
12562 ins->dreg = alloc_dreg (cfg, ins->type);
12563 type_from_op (cfg, ins, arg1, arg2);
12565 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12567 * The backends expect the fceq opcodes to do the
12570 ins->sreg1 = cmp->sreg1;
12571 ins->sreg2 = cmp->sreg2;
12574 MONO_ADD_INS (cfg->cbb, ins);
12580 MonoInst *argconst;
12581 MonoMethod *cil_method;
12583 CHECK_STACK_OVF (1);
12585 n = read32 (ip + 2);
12586 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12587 if (!cmethod || mono_loader_get_last_error ())
12589 mono_class_init (cmethod->klass);
12591 mono_save_token_info (cfg, image, n, cmethod);
12593 context_used = mini_method_check_context_used (cfg, cmethod);
12595 cil_method = cmethod;
12596 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12597 METHOD_ACCESS_FAILURE (method, cil_method);
12599 if (mono_security_core_clr_enabled ())
12600 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12603 * Optimize the common case of ldftn+delegate creation
12605 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12606 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12607 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12608 MonoInst *target_ins, *handle_ins;
12609 MonoMethod *invoke;
12610 int invoke_context_used;
12612 invoke = mono_get_delegate_invoke (ctor_method->klass);
12613 if (!invoke || !mono_method_signature (invoke))
12616 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12618 target_ins = sp [-1];
12620 if (mono_security_core_clr_enabled ())
12621 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12623 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12624 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12625 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12627 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12631 /* FIXME: SGEN support */
12632 if (invoke_context_used == 0) {
12634 if (cfg->verbose_level > 3)
12635 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12636 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12639 CHECK_CFG_EXCEPTION;
12649 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12650 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12654 inline_costs += 10 * num_calls++;
12657 case CEE_LDVIRTFTN: {
12658 MonoInst *args [2];
12662 n = read32 (ip + 2);
12663 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12664 if (!cmethod || mono_loader_get_last_error ())
12666 mono_class_init (cmethod->klass);
12668 context_used = mini_method_check_context_used (cfg, cmethod);
12670 if (mono_security_core_clr_enabled ())
12671 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12674 * Optimize the common case of ldvirtftn+delegate creation
12676 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12677 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12678 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12679 MonoInst *target_ins, *handle_ins;
12680 MonoMethod *invoke;
12681 int invoke_context_used;
12682 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12684 invoke = mono_get_delegate_invoke (ctor_method->klass);
12685 if (!invoke || !mono_method_signature (invoke))
12688 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12690 target_ins = sp [-1];
12692 if (mono_security_core_clr_enabled ())
12693 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12695 /* FIXME: SGEN support */
12696 if (invoke_context_used == 0 || cfg->llvm_only) {
12698 if (cfg->verbose_level > 3)
12699 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12700 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12703 CHECK_CFG_EXCEPTION;
12716 args [1] = emit_get_rgctx_method (cfg, context_used,
12717 cmethod, MONO_RGCTX_INFO_METHOD);
12720 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12722 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12725 inline_costs += 10 * num_calls++;
12729 CHECK_STACK_OVF (1);
12731 n = read16 (ip + 2);
12733 EMIT_NEW_ARGLOAD (cfg, ins, n);
12738 CHECK_STACK_OVF (1);
12740 n = read16 (ip + 2);
12742 NEW_ARGLOADA (cfg, ins, n);
12743 MONO_ADD_INS (cfg->cbb, ins);
12751 n = read16 (ip + 2);
12753 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12755 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12759 CHECK_STACK_OVF (1);
12761 n = read16 (ip + 2);
12763 EMIT_NEW_LOCLOAD (cfg, ins, n);
12768 unsigned char *tmp_ip;
12769 CHECK_STACK_OVF (1);
12771 n = read16 (ip + 2);
12774 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12780 EMIT_NEW_LOCLOADA (cfg, ins, n);
12789 n = read16 (ip + 2);
12791 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12793 emit_stloc_ir (cfg, sp, header, n);
12800 if (sp != stack_start)
12802 if (cfg->method != method)
12804 * Inlining this into a loop in a parent could lead to
12805 * stack overflows which is different behavior than the
12806 * non-inlined case, thus disable inlining in this case.
12808 INLINE_FAILURE("localloc");
12810 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12811 ins->dreg = alloc_preg (cfg);
12812 ins->sreg1 = sp [0]->dreg;
12813 ins->type = STACK_PTR;
12814 MONO_ADD_INS (cfg->cbb, ins);
12816 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12818 ins->flags |= MONO_INST_INIT;
12823 case CEE_ENDFILTER: {
12824 MonoExceptionClause *clause, *nearest;
12829 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12831 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12832 ins->sreg1 = (*sp)->dreg;
12833 MONO_ADD_INS (cfg->cbb, ins);
12834 start_new_bblock = 1;
12838 for (cc = 0; cc < header->num_clauses; ++cc) {
12839 clause = &header->clauses [cc];
12840 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12841 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12842 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12845 g_assert (nearest);
12846 if ((ip - header->code) != nearest->handler_offset)
12851 case CEE_UNALIGNED_:
12852 ins_flag |= MONO_INST_UNALIGNED;
12853 /* FIXME: record alignment? we can assume 1 for now */
12857 case CEE_VOLATILE_:
12858 ins_flag |= MONO_INST_VOLATILE;
12862 ins_flag |= MONO_INST_TAILCALL;
12863 cfg->flags |= MONO_CFG_HAS_TAIL;
12864 /* Can't inline tail calls at this time */
12865 inline_costs += 100000;
12872 token = read32 (ip + 2);
12873 klass = mini_get_class (method, token, generic_context);
12874 CHECK_TYPELOAD (klass);
12875 if (generic_class_is_reference_type (cfg, klass))
12876 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12878 mini_emit_initobj (cfg, *sp, NULL, klass);
12882 case CEE_CONSTRAINED_:
12884 token = read32 (ip + 2);
12885 constrained_class = mini_get_class (method, token, generic_context);
12886 CHECK_TYPELOAD (constrained_class);
12890 case CEE_INITBLK: {
12891 MonoInst *iargs [3];
12895 /* Skip optimized paths for volatile operations. */
12896 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12897 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12898 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12899 /* emit_memset only works when val == 0 */
12900 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12903 iargs [0] = sp [0];
12904 iargs [1] = sp [1];
12905 iargs [2] = sp [2];
12906 if (ip [1] == CEE_CPBLK) {
12908 * FIXME: It's unclear whether we should be emitting both the acquire
12909 * and release barriers for cpblk. It is technically both a load and
12910 * store operation, so it seems like that's the sensible thing to do.
12912 * FIXME: We emit full barriers on both sides of the operation for
12913 * simplicity. We should have a separate atomic memcpy method instead.
12915 MonoMethod *memcpy_method = get_memcpy_method ();
12917 if (ins_flag & MONO_INST_VOLATILE)
12918 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12920 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12921 call->flags |= ins_flag;
12923 if (ins_flag & MONO_INST_VOLATILE)
12924 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12926 MonoMethod *memset_method = get_memset_method ();
12927 if (ins_flag & MONO_INST_VOLATILE) {
12928 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12929 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12931 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12932 call->flags |= ins_flag;
12943 ins_flag |= MONO_INST_NOTYPECHECK;
12945 ins_flag |= MONO_INST_NORANGECHECK;
12946 /* we ignore the no-nullcheck for now since we
12947 * really do it explicitly only when doing callvirt->call
12951 case CEE_RETHROW: {
12953 int handler_offset = -1;
12955 for (i = 0; i < header->num_clauses; ++i) {
12956 MonoExceptionClause *clause = &header->clauses [i];
12957 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12958 handler_offset = clause->handler_offset;
12963 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12965 if (handler_offset == -1)
12968 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12969 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12970 ins->sreg1 = load->dreg;
12971 MONO_ADD_INS (cfg->cbb, ins);
12973 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12974 MONO_ADD_INS (cfg->cbb, ins);
12977 link_bblock (cfg, cfg->cbb, end_bblock);
12978 start_new_bblock = 1;
12986 CHECK_STACK_OVF (1);
12988 token = read32 (ip + 2);
12989 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12990 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12993 val = mono_type_size (type, &ialign);
12995 MonoClass *klass = mini_get_class (method, token, generic_context);
12996 CHECK_TYPELOAD (klass);
12998 val = mono_type_size (&klass->byval_arg, &ialign);
13000 if (mini_is_gsharedvt_klass (klass))
13001 GSHAREDVT_FAILURE (*ip);
13003 EMIT_NEW_ICONST (cfg, ins, val);
13008 case CEE_REFANYTYPE: {
13009 MonoInst *src_var, *src;
13011 GSHAREDVT_FAILURE (*ip);
13017 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13019 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13020 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13021 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13026 case CEE_READONLY_:
13039 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13049 g_warning ("opcode 0x%02x not handled", *ip);
13053 if (start_new_bblock != 1)
13056 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13057 if (cfg->cbb->next_bb) {
13058 /* This could already be set because of inlining, #693905 */
13059 MonoBasicBlock *bb = cfg->cbb;
13061 while (bb->next_bb)
13063 bb->next_bb = end_bblock;
13065 cfg->cbb->next_bb = end_bblock;
13068 if (cfg->method == method && cfg->domainvar) {
13070 MonoInst *get_domain;
13072 cfg->cbb = init_localsbb;
13074 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13075 MONO_ADD_INS (cfg->cbb, get_domain);
13077 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13079 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13080 MONO_ADD_INS (cfg->cbb, store);
13083 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13084 if (cfg->compile_aot)
13085 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13086 mono_get_got_var (cfg);
13089 if (cfg->method == method && cfg->got_var)
13090 mono_emit_load_got_addr (cfg);
13092 if (init_localsbb) {
13093 cfg->cbb = init_localsbb;
13095 for (i = 0; i < header->num_locals; ++i) {
13096 emit_init_local (cfg, i, header->locals [i], init_locals);
13100 if (cfg->init_ref_vars && cfg->method == method) {
13101 /* Emit initialization for ref vars */
13102 // FIXME: Avoid duplication initialization for IL locals.
13103 for (i = 0; i < cfg->num_varinfo; ++i) {
13104 MonoInst *ins = cfg->varinfo [i];
13106 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13107 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13111 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13112 cfg->cbb = init_localsbb;
13113 emit_push_lmf (cfg);
13116 cfg->cbb = init_localsbb;
13117 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13120 MonoBasicBlock *bb;
13123 * Make seq points at backward branch targets interruptable.
13125 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13126 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13127 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13130 /* Add a sequence point for method entry/exit events */
13131 if (seq_points && cfg->gen_sdb_seq_points) {
13132 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13133 MONO_ADD_INS (init_localsbb, ins);
13134 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13135 MONO_ADD_INS (cfg->bb_exit, ins);
13139 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13140 * the code they refer to was dead (#11880).
13142 if (sym_seq_points) {
13143 for (i = 0; i < header->code_size; ++i) {
13144 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13147 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13148 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13155 if (cfg->method == method) {
13156 MonoBasicBlock *bb;
13157 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13158 bb->region = mono_find_block_region (cfg, bb->real_offset);
13160 mono_create_spvar_for_region (cfg, bb->region);
13161 if (cfg->verbose_level > 2)
13162 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13166 if (inline_costs < 0) {
13169 /* Method is too large */
13170 mname = mono_method_full_name (method, TRUE);
13171 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13172 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13176 if ((cfg->verbose_level > 2) && (cfg->method == method))
13177 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13182 g_assert (!mono_error_ok (&cfg->error));
13186 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13190 set_exception_type_from_invalid_il (cfg, method, ip);
13194 g_slist_free (class_inits);
13195 mono_basic_block_free (original_bb);
13196 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13197 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13198 if (cfg->exception_type)
13201 return inline_costs;
13205 store_membase_reg_to_store_membase_imm (int opcode)
13208 case OP_STORE_MEMBASE_REG:
13209 return OP_STORE_MEMBASE_IMM;
13210 case OP_STOREI1_MEMBASE_REG:
13211 return OP_STOREI1_MEMBASE_IMM;
13212 case OP_STOREI2_MEMBASE_REG:
13213 return OP_STOREI2_MEMBASE_IMM;
13214 case OP_STOREI4_MEMBASE_REG:
13215 return OP_STOREI4_MEMBASE_IMM;
13216 case OP_STOREI8_MEMBASE_REG:
13217 return OP_STOREI8_MEMBASE_IMM;
13219 g_assert_not_reached ();
13226 mono_op_to_op_imm (int opcode)
13230 return OP_IADD_IMM;
13232 return OP_ISUB_IMM;
13234 return OP_IDIV_IMM;
13236 return OP_IDIV_UN_IMM;
13238 return OP_IREM_IMM;
13240 return OP_IREM_UN_IMM;
13242 return OP_IMUL_IMM;
13244 return OP_IAND_IMM;
13248 return OP_IXOR_IMM;
13250 return OP_ISHL_IMM;
13252 return OP_ISHR_IMM;
13254 return OP_ISHR_UN_IMM;
13257 return OP_LADD_IMM;
13259 return OP_LSUB_IMM;
13261 return OP_LAND_IMM;
13265 return OP_LXOR_IMM;
13267 return OP_LSHL_IMM;
13269 return OP_LSHR_IMM;
13271 return OP_LSHR_UN_IMM;
13272 #if SIZEOF_REGISTER == 8
13274 return OP_LREM_IMM;
13278 return OP_COMPARE_IMM;
13280 return OP_ICOMPARE_IMM;
13282 return OP_LCOMPARE_IMM;
13284 case OP_STORE_MEMBASE_REG:
13285 return OP_STORE_MEMBASE_IMM;
13286 case OP_STOREI1_MEMBASE_REG:
13287 return OP_STOREI1_MEMBASE_IMM;
13288 case OP_STOREI2_MEMBASE_REG:
13289 return OP_STOREI2_MEMBASE_IMM;
13290 case OP_STOREI4_MEMBASE_REG:
13291 return OP_STOREI4_MEMBASE_IMM;
13293 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13295 return OP_X86_PUSH_IMM;
13296 case OP_X86_COMPARE_MEMBASE_REG:
13297 return OP_X86_COMPARE_MEMBASE_IMM;
13299 #if defined(TARGET_AMD64)
13300 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13301 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13303 case OP_VOIDCALL_REG:
13304 return OP_VOIDCALL;
13312 return OP_LOCALLOC_IMM;
13319 ldind_to_load_membase (int opcode)
13323 return OP_LOADI1_MEMBASE;
13325 return OP_LOADU1_MEMBASE;
13327 return OP_LOADI2_MEMBASE;
13329 return OP_LOADU2_MEMBASE;
13331 return OP_LOADI4_MEMBASE;
13333 return OP_LOADU4_MEMBASE;
13335 return OP_LOAD_MEMBASE;
13336 case CEE_LDIND_REF:
13337 return OP_LOAD_MEMBASE;
13339 return OP_LOADI8_MEMBASE;
13341 return OP_LOADR4_MEMBASE;
13343 return OP_LOADR8_MEMBASE;
13345 g_assert_not_reached ();
13352 stind_to_store_membase (int opcode)
13356 return OP_STOREI1_MEMBASE_REG;
13358 return OP_STOREI2_MEMBASE_REG;
13360 return OP_STOREI4_MEMBASE_REG;
13362 case CEE_STIND_REF:
13363 return OP_STORE_MEMBASE_REG;
13365 return OP_STOREI8_MEMBASE_REG;
13367 return OP_STORER4_MEMBASE_REG;
13369 return OP_STORER8_MEMBASE_REG;
13371 g_assert_not_reached ();
13378 mono_load_membase_to_load_mem (int opcode)
13380 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13381 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13383 case OP_LOAD_MEMBASE:
13384 return OP_LOAD_MEM;
13385 case OP_LOADU1_MEMBASE:
13386 return OP_LOADU1_MEM;
13387 case OP_LOADU2_MEMBASE:
13388 return OP_LOADU2_MEM;
13389 case OP_LOADI4_MEMBASE:
13390 return OP_LOADI4_MEM;
13391 case OP_LOADU4_MEMBASE:
13392 return OP_LOADU4_MEM;
13393 #if SIZEOF_REGISTER == 8
13394 case OP_LOADI8_MEMBASE:
13395 return OP_LOADI8_MEM;
13404 op_to_op_dest_membase (int store_opcode, int opcode)
13406 #if defined(TARGET_X86)
13407 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13412 return OP_X86_ADD_MEMBASE_REG;
13414 return OP_X86_SUB_MEMBASE_REG;
13416 return OP_X86_AND_MEMBASE_REG;
13418 return OP_X86_OR_MEMBASE_REG;
13420 return OP_X86_XOR_MEMBASE_REG;
13423 return OP_X86_ADD_MEMBASE_IMM;
13426 return OP_X86_SUB_MEMBASE_IMM;
13429 return OP_X86_AND_MEMBASE_IMM;
13432 return OP_X86_OR_MEMBASE_IMM;
13435 return OP_X86_XOR_MEMBASE_IMM;
13441 #if defined(TARGET_AMD64)
13442 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13447 return OP_X86_ADD_MEMBASE_REG;
13449 return OP_X86_SUB_MEMBASE_REG;
13451 return OP_X86_AND_MEMBASE_REG;
13453 return OP_X86_OR_MEMBASE_REG;
13455 return OP_X86_XOR_MEMBASE_REG;
13457 return OP_X86_ADD_MEMBASE_IMM;
13459 return OP_X86_SUB_MEMBASE_IMM;
13461 return OP_X86_AND_MEMBASE_IMM;
13463 return OP_X86_OR_MEMBASE_IMM;
13465 return OP_X86_XOR_MEMBASE_IMM;
13467 return OP_AMD64_ADD_MEMBASE_REG;
13469 return OP_AMD64_SUB_MEMBASE_REG;
13471 return OP_AMD64_AND_MEMBASE_REG;
13473 return OP_AMD64_OR_MEMBASE_REG;
13475 return OP_AMD64_XOR_MEMBASE_REG;
13478 return OP_AMD64_ADD_MEMBASE_IMM;
13481 return OP_AMD64_SUB_MEMBASE_IMM;
13484 return OP_AMD64_AND_MEMBASE_IMM;
13487 return OP_AMD64_OR_MEMBASE_IMM;
13490 return OP_AMD64_XOR_MEMBASE_IMM;
13500 op_to_op_store_membase (int store_opcode, int opcode)
13502 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13505 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13506 return OP_X86_SETEQ_MEMBASE;
13508 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13509 return OP_X86_SETNE_MEMBASE;
13517 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13520 /* FIXME: This has sign extension issues */
13522 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13523 return OP_X86_COMPARE_MEMBASE8_IMM;
13526 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13531 return OP_X86_PUSH_MEMBASE;
13532 case OP_COMPARE_IMM:
13533 case OP_ICOMPARE_IMM:
13534 return OP_X86_COMPARE_MEMBASE_IMM;
13537 return OP_X86_COMPARE_MEMBASE_REG;
13541 #ifdef TARGET_AMD64
13542 /* FIXME: This has sign extension issues */
13544 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13545 return OP_X86_COMPARE_MEMBASE8_IMM;
13550 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13551 return OP_X86_PUSH_MEMBASE;
13553 /* FIXME: This only works for 32 bit immediates
13554 case OP_COMPARE_IMM:
13555 case OP_LCOMPARE_IMM:
13556 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13557 return OP_AMD64_COMPARE_MEMBASE_IMM;
13559 case OP_ICOMPARE_IMM:
13560 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13561 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13565 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13566 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13567 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13568 return OP_AMD64_COMPARE_MEMBASE_REG;
13571 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13572 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13581 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13584 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13590 return OP_X86_COMPARE_REG_MEMBASE;
13592 return OP_X86_ADD_REG_MEMBASE;
13594 return OP_X86_SUB_REG_MEMBASE;
13596 return OP_X86_AND_REG_MEMBASE;
13598 return OP_X86_OR_REG_MEMBASE;
13600 return OP_X86_XOR_REG_MEMBASE;
13604 #ifdef TARGET_AMD64
13605 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13608 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13610 return OP_X86_ADD_REG_MEMBASE;
13612 return OP_X86_SUB_REG_MEMBASE;
13614 return OP_X86_AND_REG_MEMBASE;
13616 return OP_X86_OR_REG_MEMBASE;
13618 return OP_X86_XOR_REG_MEMBASE;
13620 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13624 return OP_AMD64_COMPARE_REG_MEMBASE;
13626 return OP_AMD64_ADD_REG_MEMBASE;
13628 return OP_AMD64_SUB_REG_MEMBASE;
13630 return OP_AMD64_AND_REG_MEMBASE;
13632 return OP_AMD64_OR_REG_MEMBASE;
13634 return OP_AMD64_XOR_REG_MEMBASE;
13643 mono_op_to_op_imm_noemul (int opcode)
13646 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13652 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13659 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13664 return mono_op_to_op_imm (opcode);
13669 * mono_handle_global_vregs:
13671 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13675 mono_handle_global_vregs (MonoCompile *cfg)
13677 gint32 *vreg_to_bb;
13678 MonoBasicBlock *bb;
13681 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13683 #ifdef MONO_ARCH_SIMD_INTRINSICS
13684 if (cfg->uses_simd_intrinsics)
13685 mono_simd_simplify_indirection (cfg);
13688 /* Find local vregs used in more than one bb */
13689 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13690 MonoInst *ins = bb->code;
13691 int block_num = bb->block_num;
13693 if (cfg->verbose_level > 2)
13694 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13697 for (; ins; ins = ins->next) {
13698 const char *spec = INS_INFO (ins->opcode);
13699 int regtype = 0, regindex;
13702 if (G_UNLIKELY (cfg->verbose_level > 2))
13703 mono_print_ins (ins);
13705 g_assert (ins->opcode >= MONO_CEE_LAST);
13707 for (regindex = 0; regindex < 4; regindex ++) {
13710 if (regindex == 0) {
13711 regtype = spec [MONO_INST_DEST];
13712 if (regtype == ' ')
13715 } else if (regindex == 1) {
13716 regtype = spec [MONO_INST_SRC1];
13717 if (regtype == ' ')
13720 } else if (regindex == 2) {
13721 regtype = spec [MONO_INST_SRC2];
13722 if (regtype == ' ')
13725 } else if (regindex == 3) {
13726 regtype = spec [MONO_INST_SRC3];
13727 if (regtype == ' ')
13732 #if SIZEOF_REGISTER == 4
13733 /* In the LLVM case, the long opcodes are not decomposed */
13734 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13736 * Since some instructions reference the original long vreg,
13737 * and some reference the two component vregs, it is quite hard
13738 * to determine when it needs to be global. So be conservative.
13740 if (!get_vreg_to_inst (cfg, vreg)) {
13741 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13743 if (cfg->verbose_level > 2)
13744 printf ("LONG VREG R%d made global.\n", vreg);
13748 * Make the component vregs volatile since the optimizations can
13749 * get confused otherwise.
13751 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13752 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13756 g_assert (vreg != -1);
13758 prev_bb = vreg_to_bb [vreg];
13759 if (prev_bb == 0) {
13760 /* 0 is a valid block num */
13761 vreg_to_bb [vreg] = block_num + 1;
13762 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13763 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13766 if (!get_vreg_to_inst (cfg, vreg)) {
13767 if (G_UNLIKELY (cfg->verbose_level > 2))
13768 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13772 if (vreg_is_ref (cfg, vreg))
13773 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13775 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13778 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13781 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13784 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13787 g_assert_not_reached ();
13791 /* Flag as having been used in more than one bb */
13792 vreg_to_bb [vreg] = -1;
13798 /* If a variable is used in only one bblock, convert it into a local vreg */
13799 for (i = 0; i < cfg->num_varinfo; i++) {
13800 MonoInst *var = cfg->varinfo [i];
13801 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13803 switch (var->type) {
13809 #if SIZEOF_REGISTER == 8
13812 #if !defined(TARGET_X86)
13813 /* Enabling this screws up the fp stack on x86 */
13816 if (mono_arch_is_soft_float ())
13819 /* Arguments are implicitly global */
13820 /* Putting R4 vars into registers doesn't work currently */
13821 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13822 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13824 * Make that the variable's liveness interval doesn't contain a call, since
13825 * that would cause the lvreg to be spilled, making the whole optimization
13828 /* This is too slow for JIT compilation */
13830 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13832 int def_index, call_index, ins_index;
13833 gboolean spilled = FALSE;
13838 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13839 const char *spec = INS_INFO (ins->opcode);
13841 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13842 def_index = ins_index;
13844 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13845 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13846 if (call_index > def_index) {
13852 if (MONO_IS_CALL (ins))
13853 call_index = ins_index;
13863 if (G_UNLIKELY (cfg->verbose_level > 2))
13864 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13865 var->flags |= MONO_INST_IS_DEAD;
13866 cfg->vreg_to_inst [var->dreg] = NULL;
13873 * Compress the varinfo and vars tables so the liveness computation is faster and
13874 * takes up less space.
13877 for (i = 0; i < cfg->num_varinfo; ++i) {
13878 MonoInst *var = cfg->varinfo [i];
13879 if (pos < i && cfg->locals_start == i)
13880 cfg->locals_start = pos;
13881 if (!(var->flags & MONO_INST_IS_DEAD)) {
13883 cfg->varinfo [pos] = cfg->varinfo [i];
13884 cfg->varinfo [pos]->inst_c0 = pos;
13885 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13886 cfg->vars [pos].idx = pos;
13887 #if SIZEOF_REGISTER == 4
13888 if (cfg->varinfo [pos]->type == STACK_I8) {
13889 /* Modify the two component vars too */
13892 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13893 var1->inst_c0 = pos;
13894 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13895 var1->inst_c0 = pos;
13902 cfg->num_varinfo = pos;
13903 if (cfg->locals_start > cfg->num_varinfo)
13904 cfg->locals_start = cfg->num_varinfo;
13908 * mono_spill_global_vars:
13910 * Generate spill code for variables which are not allocated to registers,
13911 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13912 * code is generated which could be optimized by the local optimization passes.
13915 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13917 MonoBasicBlock *bb;
13919 int orig_next_vreg;
13920 guint32 *vreg_to_lvreg;
13922 guint32 i, lvregs_len;
13923 gboolean dest_has_lvreg = FALSE;
13924 guint32 stacktypes [128];
13925 MonoInst **live_range_start, **live_range_end;
13926 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13927 int *gsharedvt_vreg_to_idx = NULL;
13929 *need_local_opts = FALSE;
13931 memset (spec2, 0, sizeof (spec2));
13933 /* FIXME: Move this function to mini.c */
13934 stacktypes ['i'] = STACK_PTR;
13935 stacktypes ['l'] = STACK_I8;
13936 stacktypes ['f'] = STACK_R8;
13937 #ifdef MONO_ARCH_SIMD_INTRINSICS
13938 stacktypes ['x'] = STACK_VTYPE;
13941 #if SIZEOF_REGISTER == 4
13942 /* Create MonoInsts for longs */
13943 for (i = 0; i < cfg->num_varinfo; i++) {
13944 MonoInst *ins = cfg->varinfo [i];
13946 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13947 switch (ins->type) {
13952 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13955 g_assert (ins->opcode == OP_REGOFFSET);
13957 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13959 tree->opcode = OP_REGOFFSET;
13960 tree->inst_basereg = ins->inst_basereg;
13961 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13963 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13965 tree->opcode = OP_REGOFFSET;
13966 tree->inst_basereg = ins->inst_basereg;
13967 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13977 if (cfg->compute_gc_maps) {
13978 /* registers need liveness info even for !non refs */
13979 for (i = 0; i < cfg->num_varinfo; i++) {
13980 MonoInst *ins = cfg->varinfo [i];
13982 if (ins->opcode == OP_REGVAR)
13983 ins->flags |= MONO_INST_GC_TRACK;
13987 if (cfg->gsharedvt) {
13988 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13990 for (i = 0; i < cfg->num_varinfo; ++i) {
13991 MonoInst *ins = cfg->varinfo [i];
13994 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13995 if (i >= cfg->locals_start) {
13997 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13998 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13999 ins->opcode = OP_GSHAREDVT_LOCAL;
14000 ins->inst_imm = idx;
14003 gsharedvt_vreg_to_idx [ins->dreg] = -1;
14004 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14010 /* FIXME: widening and truncation */
14013 * As an optimization, when a variable allocated to the stack is first loaded into
14014 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14015 * the variable again.
14017 orig_next_vreg = cfg->next_vreg;
14018 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14019 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14023 * These arrays contain the first and last instructions accessing a given
14025 * Since we emit bblocks in the same order we process them here, and we
14026 * don't split live ranges, these will precisely describe the live range of
14027 * the variable, i.e. the instruction range where a valid value can be found
14028 * in the variables location.
14029 * The live range is computed using the liveness info computed by the liveness pass.
14030 * We can't use vmv->range, since that is an abstract live range, and we need
14031 * one which is instruction precise.
14032 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14034 /* FIXME: Only do this if debugging info is requested */
14035 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14036 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14037 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14038 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14040 /* Add spill loads/stores */
14041 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14044 if (cfg->verbose_level > 2)
14045 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14047 /* Clear vreg_to_lvreg array */
14048 for (i = 0; i < lvregs_len; i++)
14049 vreg_to_lvreg [lvregs [i]] = 0;
14053 MONO_BB_FOR_EACH_INS (bb, ins) {
14054 const char *spec = INS_INFO (ins->opcode);
14055 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14056 gboolean store, no_lvreg;
14057 int sregs [MONO_MAX_SRC_REGS];
14059 if (G_UNLIKELY (cfg->verbose_level > 2))
14060 mono_print_ins (ins);
14062 if (ins->opcode == OP_NOP)
14066 * We handle LDADDR here as well, since it can only be decomposed
14067 * when variable addresses are known.
14069 if (ins->opcode == OP_LDADDR) {
14070 MonoInst *var = ins->inst_p0;
14072 if (var->opcode == OP_VTARG_ADDR) {
14073 /* Happens on SPARC/S390 where vtypes are passed by reference */
14074 MonoInst *vtaddr = var->inst_left;
14075 if (vtaddr->opcode == OP_REGVAR) {
14076 ins->opcode = OP_MOVE;
14077 ins->sreg1 = vtaddr->dreg;
14079 else if (var->inst_left->opcode == OP_REGOFFSET) {
14080 ins->opcode = OP_LOAD_MEMBASE;
14081 ins->inst_basereg = vtaddr->inst_basereg;
14082 ins->inst_offset = vtaddr->inst_offset;
14085 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14086 /* gsharedvt arg passed by ref */
14087 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14089 ins->opcode = OP_LOAD_MEMBASE;
14090 ins->inst_basereg = var->inst_basereg;
14091 ins->inst_offset = var->inst_offset;
14092 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14093 MonoInst *load, *load2, *load3;
14094 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14095 int reg1, reg2, reg3;
14096 MonoInst *info_var = cfg->gsharedvt_info_var;
14097 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14101 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14104 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14106 g_assert (info_var);
14107 g_assert (locals_var);
14109 /* Mark the instruction used to compute the locals var as used */
14110 cfg->gsharedvt_locals_var_ins = NULL;
14112 /* Load the offset */
14113 if (info_var->opcode == OP_REGOFFSET) {
14114 reg1 = alloc_ireg (cfg);
14115 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14116 } else if (info_var->opcode == OP_REGVAR) {
14118 reg1 = info_var->dreg;
14120 g_assert_not_reached ();
14122 reg2 = alloc_ireg (cfg);
14123 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14124 /* Load the locals area address */
14125 reg3 = alloc_ireg (cfg);
14126 if (locals_var->opcode == OP_REGOFFSET) {
14127 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14128 } else if (locals_var->opcode == OP_REGVAR) {
14129 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14131 g_assert_not_reached ();
14133 /* Compute the address */
14134 ins->opcode = OP_PADD;
14138 mono_bblock_insert_before_ins (bb, ins, load3);
14139 mono_bblock_insert_before_ins (bb, load3, load2);
14141 mono_bblock_insert_before_ins (bb, load2, load);
14143 g_assert (var->opcode == OP_REGOFFSET);
14145 ins->opcode = OP_ADD_IMM;
14146 ins->sreg1 = var->inst_basereg;
14147 ins->inst_imm = var->inst_offset;
14150 *need_local_opts = TRUE;
14151 spec = INS_INFO (ins->opcode);
14154 if (ins->opcode < MONO_CEE_LAST) {
14155 mono_print_ins (ins);
14156 g_assert_not_reached ();
14160 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14164 if (MONO_IS_STORE_MEMBASE (ins)) {
14165 tmp_reg = ins->dreg;
14166 ins->dreg = ins->sreg2;
14167 ins->sreg2 = tmp_reg;
14170 spec2 [MONO_INST_DEST] = ' ';
14171 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14172 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14173 spec2 [MONO_INST_SRC3] = ' ';
14175 } else if (MONO_IS_STORE_MEMINDEX (ins))
14176 g_assert_not_reached ();
14181 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14182 printf ("\t %.3s %d", spec, ins->dreg);
14183 num_sregs = mono_inst_get_src_registers (ins, sregs);
14184 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14185 printf (" %d", sregs [srcindex]);
14192 regtype = spec [MONO_INST_DEST];
14193 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14196 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14197 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14198 MonoInst *store_ins;
14200 MonoInst *def_ins = ins;
14201 int dreg = ins->dreg; /* The original vreg */
14203 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14205 if (var->opcode == OP_REGVAR) {
14206 ins->dreg = var->dreg;
14207 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14209 * Instead of emitting a load+store, use a _membase opcode.
14211 g_assert (var->opcode == OP_REGOFFSET);
14212 if (ins->opcode == OP_MOVE) {
14216 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14217 ins->inst_basereg = var->inst_basereg;
14218 ins->inst_offset = var->inst_offset;
14221 spec = INS_INFO (ins->opcode);
14225 g_assert (var->opcode == OP_REGOFFSET);
14227 prev_dreg = ins->dreg;
14229 /* Invalidate any previous lvreg for this vreg */
14230 vreg_to_lvreg [ins->dreg] = 0;
14234 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14236 store_opcode = OP_STOREI8_MEMBASE_REG;
14239 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14241 #if SIZEOF_REGISTER != 8
14242 if (regtype == 'l') {
14243 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14244 mono_bblock_insert_after_ins (bb, ins, store_ins);
14245 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14246 mono_bblock_insert_after_ins (bb, ins, store_ins);
14247 def_ins = store_ins;
14252 g_assert (store_opcode != OP_STOREV_MEMBASE);
14254 /* Try to fuse the store into the instruction itself */
14255 /* FIXME: Add more instructions */
14256 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14257 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14258 ins->inst_imm = ins->inst_c0;
14259 ins->inst_destbasereg = var->inst_basereg;
14260 ins->inst_offset = var->inst_offset;
14261 spec = INS_INFO (ins->opcode);
14262 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14263 ins->opcode = store_opcode;
14264 ins->inst_destbasereg = var->inst_basereg;
14265 ins->inst_offset = var->inst_offset;
14269 tmp_reg = ins->dreg;
14270 ins->dreg = ins->sreg2;
14271 ins->sreg2 = tmp_reg;
14274 spec2 [MONO_INST_DEST] = ' ';
14275 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14276 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14277 spec2 [MONO_INST_SRC3] = ' ';
14279 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14280 // FIXME: The backends expect the base reg to be in inst_basereg
14281 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14283 ins->inst_basereg = var->inst_basereg;
14284 ins->inst_offset = var->inst_offset;
14285 spec = INS_INFO (ins->opcode);
14287 /* printf ("INS: "); mono_print_ins (ins); */
14288 /* Create a store instruction */
14289 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14291 /* Insert it after the instruction */
14292 mono_bblock_insert_after_ins (bb, ins, store_ins);
14294 def_ins = store_ins;
14297 * We can't assign ins->dreg to var->dreg here, since the
14298 * sregs could use it. So set a flag, and do it after
14301 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14302 dest_has_lvreg = TRUE;
14307 if (def_ins && !live_range_start [dreg]) {
14308 live_range_start [dreg] = def_ins;
14309 live_range_start_bb [dreg] = bb;
14312 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14315 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14316 tmp->inst_c1 = dreg;
14317 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14324 num_sregs = mono_inst_get_src_registers (ins, sregs);
14325 for (srcindex = 0; srcindex < 3; ++srcindex) {
14326 regtype = spec [MONO_INST_SRC1 + srcindex];
14327 sreg = sregs [srcindex];
14329 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14330 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14331 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14332 MonoInst *use_ins = ins;
14333 MonoInst *load_ins;
14334 guint32 load_opcode;
14336 if (var->opcode == OP_REGVAR) {
14337 sregs [srcindex] = var->dreg;
14338 //mono_inst_set_src_registers (ins, sregs);
14339 live_range_end [sreg] = use_ins;
14340 live_range_end_bb [sreg] = bb;
14342 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14345 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14346 /* var->dreg is a hreg */
14347 tmp->inst_c1 = sreg;
14348 mono_bblock_insert_after_ins (bb, ins, tmp);
14354 g_assert (var->opcode == OP_REGOFFSET);
14356 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14358 g_assert (load_opcode != OP_LOADV_MEMBASE);
14360 if (vreg_to_lvreg [sreg]) {
14361 g_assert (vreg_to_lvreg [sreg] != -1);
14363 /* The variable is already loaded to an lvreg */
14364 if (G_UNLIKELY (cfg->verbose_level > 2))
14365 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14366 sregs [srcindex] = vreg_to_lvreg [sreg];
14367 //mono_inst_set_src_registers (ins, sregs);
14371 /* Try to fuse the load into the instruction */
14372 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14373 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14374 sregs [0] = var->inst_basereg;
14375 //mono_inst_set_src_registers (ins, sregs);
14376 ins->inst_offset = var->inst_offset;
14377 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14378 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14379 sregs [1] = var->inst_basereg;
14380 //mono_inst_set_src_registers (ins, sregs);
14381 ins->inst_offset = var->inst_offset;
14383 if (MONO_IS_REAL_MOVE (ins)) {
14384 ins->opcode = OP_NOP;
14387 //printf ("%d ", srcindex); mono_print_ins (ins);
14389 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14391 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14392 if (var->dreg == prev_dreg) {
14394 * sreg refers to the value loaded by the load
14395 * emitted below, but we need to use ins->dreg
14396 * since it refers to the store emitted earlier.
14400 g_assert (sreg != -1);
14401 vreg_to_lvreg [var->dreg] = sreg;
14402 g_assert (lvregs_len < 1024);
14403 lvregs [lvregs_len ++] = var->dreg;
14407 sregs [srcindex] = sreg;
14408 //mono_inst_set_src_registers (ins, sregs);
14410 #if SIZEOF_REGISTER != 8
14411 if (regtype == 'l') {
14412 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14413 mono_bblock_insert_before_ins (bb, ins, load_ins);
14414 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14415 mono_bblock_insert_before_ins (bb, ins, load_ins);
14416 use_ins = load_ins;
14421 #if SIZEOF_REGISTER == 4
14422 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14424 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14425 mono_bblock_insert_before_ins (bb, ins, load_ins);
14426 use_ins = load_ins;
14430 if (var->dreg < orig_next_vreg) {
14431 live_range_end [var->dreg] = use_ins;
14432 live_range_end_bb [var->dreg] = bb;
14435 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14438 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14439 tmp->inst_c1 = var->dreg;
14440 mono_bblock_insert_after_ins (bb, ins, tmp);
14444 mono_inst_set_src_registers (ins, sregs);
14446 if (dest_has_lvreg) {
14447 g_assert (ins->dreg != -1);
14448 vreg_to_lvreg [prev_dreg] = ins->dreg;
14449 g_assert (lvregs_len < 1024);
14450 lvregs [lvregs_len ++] = prev_dreg;
14451 dest_has_lvreg = FALSE;
14455 tmp_reg = ins->dreg;
14456 ins->dreg = ins->sreg2;
14457 ins->sreg2 = tmp_reg;
14460 if (MONO_IS_CALL (ins)) {
14461 /* Clear vreg_to_lvreg array */
14462 for (i = 0; i < lvregs_len; i++)
14463 vreg_to_lvreg [lvregs [i]] = 0;
14465 } else if (ins->opcode == OP_NOP) {
14467 MONO_INST_NULLIFY_SREGS (ins);
14470 if (cfg->verbose_level > 2)
14471 mono_print_ins_index (1, ins);
14474 /* Extend the live range based on the liveness info */
14475 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14476 for (i = 0; i < cfg->num_varinfo; i ++) {
14477 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14479 if (vreg_is_volatile (cfg, vi->vreg))
14480 /* The liveness info is incomplete */
14483 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14484 /* Live from at least the first ins of this bb */
14485 live_range_start [vi->vreg] = bb->code;
14486 live_range_start_bb [vi->vreg] = bb;
14489 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14490 /* Live at least until the last ins of this bb */
14491 live_range_end [vi->vreg] = bb->last_ins;
14492 live_range_end_bb [vi->vreg] = bb;
14499 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14500 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14502 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14503 for (i = 0; i < cfg->num_varinfo; ++i) {
14504 int vreg = MONO_VARINFO (cfg, i)->vreg;
14507 if (live_range_start [vreg]) {
14508 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14510 ins->inst_c1 = vreg;
14511 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14513 if (live_range_end [vreg]) {
14514 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14516 ins->inst_c1 = vreg;
14517 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14518 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14520 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14525 if (cfg->gsharedvt_locals_var_ins) {
14526 /* Nullify if unused */
14527 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14528 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14531 g_free (live_range_start);
14532 g_free (live_range_end);
14533 g_free (live_range_start_bb);
14534 g_free (live_range_end_bb);
14539 * - use 'iadd' instead of 'int_add'
14540 * - handling ovf opcodes: decompose in method_to_ir.
14541 * - unify iregs/fregs
14542 * -> partly done, the missing parts are:
14543 * - a more complete unification would involve unifying the hregs as well, so
14544 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14545 * would no longer map to the machine hregs, so the code generators would need to
14546 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14547 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14548 * fp/non-fp branches speeds it up by about 15%.
14549 * - use sext/zext opcodes instead of shifts
14551 * - get rid of TEMPLOADs if possible and use vregs instead
14552 * - clean up usage of OP_P/OP_ opcodes
14553 * - cleanup usage of DUMMY_USE
14554 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14556 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14557 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14558 * - make sure handle_stack_args () is called before the branch is emitted
14559 * - when the new IR is done, get rid of all unused stuff
14560 * - COMPARE/BEQ as separate instructions or unify them ?
14561 * - keeping them separate allows specialized compare instructions like
14562 * compare_imm, compare_membase
14563 * - most back ends unify fp compare+branch, fp compare+ceq
14564 * - integrate mono_save_args into inline_method
14565 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14566 * - handle long shift opts on 32 bit platforms somehow: they require
14567 * 3 sregs (2 for arg1 and 1 for arg2)
14568 * - make byref a 'normal' type.
14569 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14570 * variable if needed.
14571 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14572 * like inline_method.
14573 * - remove inlining restrictions
14574 * - fix LNEG and enable cfold of INEG
14575 * - generalize x86 optimizations like ldelema as a peephole optimization
14576 * - add store_mem_imm for amd64
14577 * - optimize the loading of the interruption flag in the managed->native wrappers
14578 * - avoid special handling of OP_NOP in passes
14579 * - move code inserting instructions into one function/macro.
14580 * - try a coalescing phase after liveness analysis
14581 * - add float -> vreg conversion + local optimizations on !x86
14582 * - figure out how to handle decomposed branches during optimizations, ie.
14583 * compare+branch, op_jump_table+op_br etc.
14584 * - promote RuntimeXHandles to vregs
14585 * - vtype cleanups:
14586 * - add a NEW_VARLOADA_VREG macro
14587 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14588 * accessing vtype fields.
14589 * - get rid of I8CONST on 64 bit platforms
14590 * - dealing with the increase in code size due to branches created during opcode
14592 * - use extended basic blocks
14593 * - all parts of the JIT
14594 * - handle_global_vregs () && local regalloc
14595 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14596 * - sources of increase in code size:
14599 * - isinst and castclass
14600 * - lvregs not allocated to global registers even if used multiple times
14601 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14603 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14604 * - add all micro optimizations from the old JIT
14605 * - put tree optimizations into the deadce pass
14606 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14607 * specific function.
14608 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14609 * fcompare + branchCC.
14610 * - create a helper function for allocating a stack slot, taking into account
14611 * MONO_CFG_HAS_SPILLUP.
14613 * - merge the ia64 switch changes.
14614 * - optimize mono_regstate2_alloc_int/float.
14615 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14616 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14617 * parts of the tree could be separated by other instructions, killing the tree
14618 * arguments, or stores killing loads etc. Also, should we fold loads into other
14619 * instructions if the result of the load is used multiple times ?
14620 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14621 * - LAST MERGE: 108395.
14622 * - when returning vtypes in registers, generate IR and append it to the end of the
14623 * last bb instead of doing it in the epilog.
14624 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14632 - When to decompose opcodes:
14633 - earlier: this makes some optimizations hard to implement, since the low level IR
14634 no longer contains the neccessary information. But it is easier to do.
14635 - later: harder to implement, enables more optimizations.
14636 - Branches inside bblocks:
14637 - created when decomposing complex opcodes.
14638 - branches to another bblock: harmless, but not tracked by the branch
14639 optimizations, so need to branch to a label at the start of the bblock.
14640 - branches to inside the same bblock: very problematic, trips up the local
14641 reg allocator. Can be fixed by spitting the current bblock, but that is a
14642 complex operation, since some local vregs can become global vregs etc.
14643 - Local/global vregs:
14644 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14645 local register allocator.
14646 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14647 structure, created by mono_create_var (). Assigned to hregs or the stack by
14648 the global register allocator.
14649 - When to do optimizations like alu->alu_imm:
14650 - earlier -> saves work later on since the IR will be smaller/simpler
14651 - later -> can work on more instructions
14652 - Handling of valuetypes:
14653 - When a vtype is pushed on the stack, a new temporary is created, an
14654 instruction computing its address (LDADDR) is emitted and pushed on
14655 the stack. Need to optimize cases when the vtype is used immediately as in
14656 argument passing, stloc etc.
14657 - Instead of the to_end stuff in the old JIT, simply call the function handling
14658 the values on the stack before emitting the last instruction of the bb.
14661 #endif /* DISABLE_JIT */