2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_domain_get;
150 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
153 * Instruction metadata
161 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
162 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
168 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
173 /* keep in sync with the enum in mini.h */
176 #include "mini-ops.h"
181 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
182 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
184 * This should contain the index of the last sreg + 1. This is not the same
185 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
187 const gint8 ins_sreg_counts[] = {
188 #include "mini-ops.h"
193 #define MONO_INIT_VARINFO(vi,id) do { \
194 (vi)->range.first_use.pos.bid = 0xffff; \
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_lreg (MonoCompile *cfg)
208 return alloc_lreg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_get_underlying_type (type);
275 switch (type->type) {
288 case MONO_TYPE_FNPTR:
290 case MONO_TYPE_CLASS:
291 case MONO_TYPE_STRING:
292 case MONO_TYPE_OBJECT:
293 case MONO_TYPE_SZARRAY:
294 case MONO_TYPE_ARRAY:
298 #if SIZEOF_REGISTER == 8
304 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
307 case MONO_TYPE_VALUETYPE:
308 if (type->data.klass->enumtype) {
309 type = mono_class_enum_basetype (type->data.klass);
312 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
315 case MONO_TYPE_TYPEDBYREF:
317 case MONO_TYPE_GENERICINST:
318 type = &type->data.generic_class->container_class->byval_arg;
322 g_assert (cfg->gshared);
323 if (mini_type_var_is_vt (type))
326 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
328 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
334 mono_print_bb (MonoBasicBlock *bb, const char *msg)
339 printf ("\n%s %d: [IN: ", msg, bb->block_num);
340 for (i = 0; i < bb->in_count; ++i)
341 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
343 for (i = 0; i < bb->out_count; ++i)
344 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
346 for (tree = bb->code; tree; tree = tree->next)
347 mono_print_ins_index (-1, tree);
351 mono_create_helper_signatures (void)
353 helper_sig_domain_get = mono_create_icall_signature ("ptr");
354 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
357 static MONO_NEVER_INLINE void
358 break_on_unverified (void)
360 if (mini_get_debug_options ()->break_on_unverified)
364 static MONO_NEVER_INLINE void
365 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
367 char *method_fname = mono_method_full_name (method, TRUE);
368 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
369 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
370 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
371 g_free (method_fname);
372 g_free (cil_method_fname);
375 static MONO_NEVER_INLINE void
376 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
378 char *method_fname = mono_method_full_name (method, TRUE);
379 char *field_fname = mono_field_full_name (field);
380 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
381 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
382 g_free (method_fname);
383 g_free (field_fname);
386 static MONO_NEVER_INLINE void
387 inline_failure (MonoCompile *cfg, const char *msg)
389 if (cfg->verbose_level >= 2)
390 printf ("inline failed: %s\n", msg);
391 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
394 static MONO_NEVER_INLINE void
395 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
397 if (cfg->verbose_level > 2) \
398 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
402 static MONO_NEVER_INLINE void
403 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
406 if (cfg->verbose_level >= 2)
407 printf ("%s\n", cfg->exception_message);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
412 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
413 * foo<T> (int i) { ldarg.0; box T; }
415 #define UNVERIFIED do { \
416 if (cfg->gsharedvt) { \
417 if (cfg->verbose_level > 2) \
418 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
419 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
420 goto exception_exit; \
422 break_on_unverified (); \
426 #define GET_BBLOCK(cfg,tblock,ip) do { \
427 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
429 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
430 NEW_BBLOCK (cfg, (tblock)); \
431 (tblock)->cil_code = (ip); \
432 ADD_BBLOCK (cfg, (tblock)); \
436 #if defined(TARGET_X86) || defined(TARGET_AMD64)
437 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
438 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
439 (dest)->dreg = alloc_ireg_mp ((cfg)); \
440 (dest)->sreg1 = (sr1); \
441 (dest)->sreg2 = (sr2); \
442 (dest)->inst_imm = (imm); \
443 (dest)->backend.shift_amount = (shift); \
444 MONO_ADD_INS ((cfg)->cbb, (dest)); \
448 /* Emit conversions so both operands of a binary opcode are of the same type */
450 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
452 MonoInst *arg1 = *arg1_ref;
453 MonoInst *arg2 = *arg2_ref;
456 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
457 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
460 /* Mixing r4/r8 is allowed by the spec */
461 if (arg1->type == STACK_R4) {
462 int dreg = alloc_freg (cfg);
464 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
465 conv->type = STACK_R8;
469 if (arg2->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
473 conv->type = STACK_R8;
479 #if SIZEOF_REGISTER == 8
480 /* FIXME: Need to add many more cases */
481 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
484 int dr = alloc_preg (cfg);
485 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
486 (ins)->sreg2 = widen->dreg;
491 #define ADD_BINOP(op) do { \
492 MONO_INST_NEW (cfg, ins, (op)); \
494 ins->sreg1 = sp [0]->dreg; \
495 ins->sreg2 = sp [1]->dreg; \
496 type_from_op (cfg, ins, sp [0], sp [1]); \
498 /* Have to insert a widening op */ \
499 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
500 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
501 MONO_ADD_INS ((cfg)->cbb, (ins)); \
502 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
505 #define ADD_UNOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 type_from_op (cfg, ins, sp [0], NULL); \
511 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
512 MONO_ADD_INS ((cfg)->cbb, (ins)); \
513 *sp++ = mono_decompose_opcode (cfg, ins); \
516 #define ADD_BINCOND(next_block) do { \
519 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
520 cmp->sreg1 = sp [0]->dreg; \
521 cmp->sreg2 = sp [1]->dreg; \
522 type_from_op (cfg, cmp, sp [0], sp [1]); \
524 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
525 type_from_op (cfg, ins, sp [0], sp [1]); \
526 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
527 GET_BBLOCK (cfg, tblock, target); \
528 link_bblock (cfg, cfg->cbb, tblock); \
529 ins->inst_true_bb = tblock; \
530 if ((next_block)) { \
531 link_bblock (cfg, cfg->cbb, (next_block)); \
532 ins->inst_false_bb = (next_block); \
533 start_new_bblock = 1; \
535 GET_BBLOCK (cfg, tblock, ip); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_false_bb = tblock; \
538 start_new_bblock = 2; \
540 if (sp != stack_start) { \
541 handle_stack_args (cfg, stack_start, sp - stack_start); \
542 CHECK_UNVERIFIABLE (cfg); \
544 MONO_ADD_INS (cfg->cbb, cmp); \
545 MONO_ADD_INS (cfg->cbb, ins); \
549 * link_bblock: Links two basic blocks
551 * links two basic blocks in the control flow graph, the 'from'
552 * argument is the starting block and the 'to' argument is the block
553 * the control flow ends to after 'from'.
556 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
558 MonoBasicBlock **newa;
562 if (from->cil_code) {
564 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
566 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
569 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
571 printf ("edge from entry to exit\n");
576 for (i = 0; i < from->out_count; ++i) {
577 if (to == from->out_bb [i]) {
583 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
584 for (i = 0; i < from->out_count; ++i) {
585 newa [i] = from->out_bb [i];
593 for (i = 0; i < to->in_count; ++i) {
594 if (from == to->in_bb [i]) {
600 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
601 for (i = 0; i < to->in_count; ++i) {
602 newa [i] = to->in_bb [i];
611 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
613 link_bblock (cfg, from, to);
617 * mono_find_block_region:
619 * We mark each basic block with a region ID. We use that to avoid BB
620 * optimizations when blocks are in different regions.
623 * A region token that encodes where this region is, and information
624 * about the clause owner for this block.
626 * The region encodes the try/catch/filter clause that owns this block
627 * as well as the type. -1 is a special value that represents a block
628 * that is in none of try/catch/filter.
631 mono_find_block_region (MonoCompile *cfg, int offset)
633 MonoMethodHeader *header = cfg->header;
634 MonoExceptionClause *clause;
637 for (i = 0; i < header->num_clauses; ++i) {
638 clause = &header->clauses [i];
639 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
640 (offset < (clause->handler_offset)))
641 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
643 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
644 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
645 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
646 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
647 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
649 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
652 for (i = 0; i < header->num_clauses; ++i) {
653 clause = &header->clauses [i];
655 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
656 return ((i + 1) << 8) | clause->flags;
663 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
665 MonoMethodHeader *header = cfg->header;
666 MonoExceptionClause *clause;
670 for (i = 0; i < header->num_clauses; ++i) {
671 clause = &header->clauses [i];
672 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
673 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
674 if (clause->flags == type)
675 res = g_list_append (res, clause);
682 mono_create_spvar_for_region (MonoCompile *cfg, int region)
686 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
690 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
691 /* prevent it from being register allocated */
692 var->flags |= MONO_INST_VOLATILE;
694 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
698 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
700 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
704 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
708 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
712 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
713 /* prevent it from being register allocated */
714 var->flags |= MONO_INST_VOLATILE;
716 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
722 * Returns the type used in the eval stack when @type is loaded.
723 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
726 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
730 type = mini_get_underlying_type (type);
731 inst->klass = klass = mono_class_from_mono_type (type);
733 inst->type = STACK_MP;
738 switch (type->type) {
740 inst->type = STACK_INV;
748 inst->type = STACK_I4;
753 case MONO_TYPE_FNPTR:
754 inst->type = STACK_PTR;
756 case MONO_TYPE_CLASS:
757 case MONO_TYPE_STRING:
758 case MONO_TYPE_OBJECT:
759 case MONO_TYPE_SZARRAY:
760 case MONO_TYPE_ARRAY:
761 inst->type = STACK_OBJ;
765 inst->type = STACK_I8;
768 inst->type = cfg->r4_stack_type;
771 inst->type = STACK_R8;
773 case MONO_TYPE_VALUETYPE:
774 if (type->data.klass->enumtype) {
775 type = mono_class_enum_basetype (type->data.klass);
779 inst->type = STACK_VTYPE;
782 case MONO_TYPE_TYPEDBYREF:
783 inst->klass = mono_defaults.typed_reference_class;
784 inst->type = STACK_VTYPE;
786 case MONO_TYPE_GENERICINST:
787 type = &type->data.generic_class->container_class->byval_arg;
791 g_assert (cfg->gshared);
792 if (mini_is_gsharedvt_type (type)) {
793 g_assert (cfg->gsharedvt);
794 inst->type = STACK_VTYPE;
796 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
800 g_error ("unknown type 0x%02x in eval stack type", type->type);
805 * The following tables are used to quickly validate the IL code in type_from_op ().
808 bin_num_table [STACK_MAX] [STACK_MAX] = {
809 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
814 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
816 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
822 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
825 /* reduce the size of this table */
827 bin_int_table [STACK_MAX] [STACK_MAX] = {
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
839 bin_comp_table [STACK_MAX] [STACK_MAX] = {
840 /* Inv i L p F & O vt r4 */
842 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
843 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
844 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
845 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
846 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
847 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
848 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
849 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
852 /* reduce the size of this table */
854 shift_table [STACK_MAX] [STACK_MAX] = {
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
866 * Tables to map from the non-specific opcode to the matching
867 * type-specific opcode.
869 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
871 binops_op_map [STACK_MAX] = {
872 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
875 /* handles from CEE_NEG to CEE_CONV_U8 */
877 unops_op_map [STACK_MAX] = {
878 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
881 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
883 ovfops_op_map [STACK_MAX] = {
884 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
887 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
889 ovf2ops_op_map [STACK_MAX] = {
890 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
893 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
895 ovf3ops_op_map [STACK_MAX] = {
896 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
899 /* handles from CEE_BEQ to CEE_BLT_UN */
901 beqops_op_map [STACK_MAX] = {
902 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
905 /* handles from CEE_CEQ to CEE_CLT_UN */
907 ceqops_op_map [STACK_MAX] = {
908 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
912 * Sets ins->type (the type on the eval stack) according to the
913 * type of the opcode and the arguments to it.
914 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
916 * FIXME: this function sets ins->type unconditionally in some cases, but
917 * it should set it to invalid for some types (a conv.x on an object)
920 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
922 switch (ins->opcode) {
929 /* FIXME: check unverifiable args for STACK_MP */
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += binops_op_map [ins->type];
938 ins->type = bin_int_table [src1->type] [src2->type];
939 ins->opcode += binops_op_map [ins->type];
944 ins->type = shift_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
950 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
951 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
952 ins->opcode = OP_LCOMPARE;
953 else if (src1->type == STACK_R4)
954 ins->opcode = OP_RCOMPARE;
955 else if (src1->type == STACK_R8)
956 ins->opcode = OP_FCOMPARE;
958 ins->opcode = OP_ICOMPARE;
960 case OP_ICOMPARE_IMM:
961 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
962 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
963 ins->opcode = OP_LCOMPARE_IMM;
975 ins->opcode += beqops_op_map [src1->type];
978 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
979 ins->opcode += ceqops_op_map [src1->type];
985 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
986 ins->opcode += ceqops_op_map [src1->type];
990 ins->type = neg_table [src1->type];
991 ins->opcode += unops_op_map [ins->type];
994 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
995 ins->type = src1->type;
997 ins->type = STACK_INV;
998 ins->opcode += unops_op_map [ins->type];
1004 ins->type = STACK_I4;
1005 ins->opcode += unops_op_map [src1->type];
1008 ins->type = STACK_R8;
1009 switch (src1->type) {
1012 ins->opcode = OP_ICONV_TO_R_UN;
1015 ins->opcode = OP_LCONV_TO_R_UN;
1019 case CEE_CONV_OVF_I1:
1020 case CEE_CONV_OVF_U1:
1021 case CEE_CONV_OVF_I2:
1022 case CEE_CONV_OVF_U2:
1023 case CEE_CONV_OVF_I4:
1024 case CEE_CONV_OVF_U4:
1025 ins->type = STACK_I4;
1026 ins->opcode += ovf3ops_op_map [src1->type];
1028 case CEE_CONV_OVF_I_UN:
1029 case CEE_CONV_OVF_U_UN:
1030 ins->type = STACK_PTR;
1031 ins->opcode += ovf2ops_op_map [src1->type];
1033 case CEE_CONV_OVF_I1_UN:
1034 case CEE_CONV_OVF_I2_UN:
1035 case CEE_CONV_OVF_I4_UN:
1036 case CEE_CONV_OVF_U1_UN:
1037 case CEE_CONV_OVF_U2_UN:
1038 case CEE_CONV_OVF_U4_UN:
1039 ins->type = STACK_I4;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1043 ins->type = STACK_PTR;
1044 switch (src1->type) {
1046 ins->opcode = OP_ICONV_TO_U;
1050 #if SIZEOF_VOID_P == 8
1051 ins->opcode = OP_LCONV_TO_U;
1053 ins->opcode = OP_MOVE;
1057 ins->opcode = OP_LCONV_TO_U;
1060 ins->opcode = OP_FCONV_TO_U;
1066 ins->type = STACK_I8;
1067 ins->opcode += unops_op_map [src1->type];
1069 case CEE_CONV_OVF_I8:
1070 case CEE_CONV_OVF_U8:
1071 ins->type = STACK_I8;
1072 ins->opcode += ovf3ops_op_map [src1->type];
1074 case CEE_CONV_OVF_U8_UN:
1075 case CEE_CONV_OVF_I8_UN:
1076 ins->type = STACK_I8;
1077 ins->opcode += ovf2ops_op_map [src1->type];
1080 ins->type = cfg->r4_stack_type;
1081 ins->opcode += unops_op_map [src1->type];
1084 ins->type = STACK_R8;
1085 ins->opcode += unops_op_map [src1->type];
1088 ins->type = STACK_R8;
1092 ins->type = STACK_I4;
1093 ins->opcode += ovfops_op_map [src1->type];
1096 case CEE_CONV_OVF_I:
1097 case CEE_CONV_OVF_U:
1098 ins->type = STACK_PTR;
1099 ins->opcode += ovfops_op_map [src1->type];
1102 case CEE_ADD_OVF_UN:
1104 case CEE_MUL_OVF_UN:
1106 case CEE_SUB_OVF_UN:
1107 ins->type = bin_num_table [src1->type] [src2->type];
1108 ins->opcode += ovfops_op_map [src1->type];
1109 if (ins->type == STACK_R8)
1110 ins->type = STACK_INV;
1112 case OP_LOAD_MEMBASE:
1113 ins->type = STACK_PTR;
1115 case OP_LOADI1_MEMBASE:
1116 case OP_LOADU1_MEMBASE:
1117 case OP_LOADI2_MEMBASE:
1118 case OP_LOADU2_MEMBASE:
1119 case OP_LOADI4_MEMBASE:
1120 case OP_LOADU4_MEMBASE:
1121 ins->type = STACK_PTR;
1123 case OP_LOADI8_MEMBASE:
1124 ins->type = STACK_I8;
1126 case OP_LOADR4_MEMBASE:
1127 ins->type = cfg->r4_stack_type;
1129 case OP_LOADR8_MEMBASE:
1130 ins->type = STACK_R8;
1133 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1137 if (ins->type == STACK_MP)
1138 ins->klass = mono_defaults.object_class;
1143 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1149 param_table [STACK_MAX] [STACK_MAX] = {
1154 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1159 switch (args->type) {
1169 for (i = 0; i < sig->param_count; ++i) {
1170 switch (args [i].type) {
1174 if (!sig->params [i]->byref)
1178 if (sig->params [i]->byref)
1180 switch (sig->params [i]->type) {
1181 case MONO_TYPE_CLASS:
1182 case MONO_TYPE_STRING:
1183 case MONO_TYPE_OBJECT:
1184 case MONO_TYPE_SZARRAY:
1185 case MONO_TYPE_ARRAY:
1192 if (sig->params [i]->byref)
1194 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1203 /*if (!param_table [args [i].type] [sig->params [i]->type])
1211 * When we need a pointer to the current domain many times in a method, we
1212 * call mono_domain_get() once and we store the result in a local variable.
1213 * This function returns the variable that represents the MonoDomain*.
1215 inline static MonoInst *
1216 mono_get_domainvar (MonoCompile *cfg)
1218 if (!cfg->domainvar)
1219 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1220 return cfg->domainvar;
1224 * The got_var contains the address of the Global Offset Table when AOT
1228 mono_get_got_var (MonoCompile *cfg)
1230 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1232 if (!cfg->got_var) {
1233 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1235 return cfg->got_var;
1239 mono_get_vtable_var (MonoCompile *cfg)
1241 g_assert (cfg->gshared);
1243 if (!cfg->rgctx_var) {
1244 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1245 /* force the var to be stack allocated */
1246 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1249 return cfg->rgctx_var;
1253 type_from_stack_type (MonoInst *ins) {
1254 switch (ins->type) {
1255 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1256 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1257 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1258 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1259 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1261 return &ins->klass->this_arg;
1262 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1263 case STACK_VTYPE: return &ins->klass->byval_arg;
1265 g_error ("stack type %d to monotype not handled\n", ins->type);
1270 static G_GNUC_UNUSED int
1271 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1273 t = mono_type_get_underlying_type (t);
1285 case MONO_TYPE_FNPTR:
1287 case MONO_TYPE_CLASS:
1288 case MONO_TYPE_STRING:
1289 case MONO_TYPE_OBJECT:
1290 case MONO_TYPE_SZARRAY:
1291 case MONO_TYPE_ARRAY:
1297 return cfg->r4_stack_type;
1300 case MONO_TYPE_VALUETYPE:
1301 case MONO_TYPE_TYPEDBYREF:
1303 case MONO_TYPE_GENERICINST:
1304 if (mono_type_generic_inst_is_valuetype (t))
1310 g_assert_not_reached ();
1317 array_access_to_klass (int opcode)
1321 return mono_defaults.byte_class;
1323 return mono_defaults.uint16_class;
1326 return mono_defaults.int_class;
1329 return mono_defaults.sbyte_class;
1332 return mono_defaults.int16_class;
1335 return mono_defaults.int32_class;
1337 return mono_defaults.uint32_class;
1340 return mono_defaults.int64_class;
1343 return mono_defaults.single_class;
1346 return mono_defaults.double_class;
1347 case CEE_LDELEM_REF:
1348 case CEE_STELEM_REF:
1349 return mono_defaults.object_class;
1351 g_assert_not_reached ();
1357 * We try to share variables when possible
1360 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1365 /* inlining can result in deeper stacks */
1366 if (slot >= cfg->header->max_stack)
1367 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1369 pos = ins->type - 1 + slot * STACK_MAX;
1371 switch (ins->type) {
1378 if ((vnum = cfg->intvars [pos]))
1379 return cfg->varinfo [vnum];
1380 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1381 cfg->intvars [pos] = res->inst_c0;
1384 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1390 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1393 * Don't use this if a generic_context is set, since that means AOT can't
1394 * look up the method using just the image+token.
1395 * table == 0 means this is a reference made from a wrapper.
1397 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1398 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1399 jump_info_token->image = image;
1400 jump_info_token->token = token;
1401 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1406 * This function is called to handle items that are left on the evaluation stack
1407 * at basic block boundaries. What happens is that we save the values to local variables
1408 * and we reload them later when first entering the target basic block (with the
1409 * handle_loaded_temps () function).
1410 * A single joint point will use the same variables (stored in the array bb->out_stack or
1411 * bb->in_stack, if the basic block is before or after the joint point).
1413 * This function needs to be called _before_ emitting the last instruction of
1414 * the bb (i.e. before emitting a branch).
1415 * If the stack merge fails at a join point, cfg->unverifiable is set.
1418 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1421 MonoBasicBlock *bb = cfg->cbb;
1422 MonoBasicBlock *outb;
1423 MonoInst *inst, **locals;
1428 if (cfg->verbose_level > 3)
1429 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1430 if (!bb->out_scount) {
1431 bb->out_scount = count;
1432 //printf ("bblock %d has out:", bb->block_num);
1434 for (i = 0; i < bb->out_count; ++i) {
1435 outb = bb->out_bb [i];
1436 /* exception handlers are linked, but they should not be considered for stack args */
1437 if (outb->flags & BB_EXCEPTION_HANDLER)
1439 //printf (" %d", outb->block_num);
1440 if (outb->in_stack) {
1442 bb->out_stack = outb->in_stack;
1448 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1449 for (i = 0; i < count; ++i) {
1451 * try to reuse temps already allocated for this purpouse, if they occupy the same
1452 * stack slot and if they are of the same type.
1453 * This won't cause conflicts since if 'local' is used to
1454 * store one of the values in the in_stack of a bblock, then
1455 * the same variable will be used for the same outgoing stack
1457 * This doesn't work when inlining methods, since the bblocks
1458 * in the inlined methods do not inherit their in_stack from
1459 * the bblock they are inlined to. See bug #58863 for an
1462 if (cfg->inlined_method)
1463 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1465 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1470 for (i = 0; i < bb->out_count; ++i) {
1471 outb = bb->out_bb [i];
1472 /* exception handlers are linked, but they should not be considered for stack args */
1473 if (outb->flags & BB_EXCEPTION_HANDLER)
1475 if (outb->in_scount) {
1476 if (outb->in_scount != bb->out_scount) {
1477 cfg->unverifiable = TRUE;
1480 continue; /* check they are the same locals */
1482 outb->in_scount = count;
1483 outb->in_stack = bb->out_stack;
1486 locals = bb->out_stack;
1488 for (i = 0; i < count; ++i) {
1489 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1490 inst->cil_code = sp [i]->cil_code;
1491 sp [i] = locals [i];
1492 if (cfg->verbose_level > 3)
1493 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1497 * It is possible that the out bblocks already have in_stack assigned, and
1498 * the in_stacks differ. In this case, we will store to all the different
1505 /* Find a bblock which has a different in_stack */
1507 while (bindex < bb->out_count) {
1508 outb = bb->out_bb [bindex];
1509 /* exception handlers are linked, but they should not be considered for stack args */
1510 if (outb->flags & BB_EXCEPTION_HANDLER) {
1514 if (outb->in_stack != locals) {
1515 for (i = 0; i < count; ++i) {
1516 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1517 inst->cil_code = sp [i]->cil_code;
1518 sp [i] = locals [i];
1519 if (cfg->verbose_level > 3)
1520 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1522 locals = outb->in_stack;
1532 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1534 int ibitmap_reg = alloc_preg (cfg);
1535 #ifdef COMPRESSED_INTERFACE_BITMAP
1537 MonoInst *res, *ins;
1538 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1539 MONO_ADD_INS (cfg->cbb, ins);
1541 if (cfg->compile_aot)
1542 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1544 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1545 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1546 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1548 int ibitmap_byte_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1552 if (cfg->compile_aot) {
1553 int iid_reg = alloc_preg (cfg);
1554 int shifted_iid_reg = alloc_preg (cfg);
1555 int ibitmap_byte_address_reg = alloc_preg (cfg);
1556 int masked_iid_reg = alloc_preg (cfg);
1557 int iid_one_bit_reg = alloc_preg (cfg);
1558 int iid_bit_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1561 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1564 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1565 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1566 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1568 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1575 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1576 * stored in "klass_reg" implements the interface "klass".
1579 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1581 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1585 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1586 * stored in "vtable_reg" implements the interface "klass".
1589 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1591 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1595 * Emit code which checks whenever the interface id of @klass is smaller than
1596 * than the value given by max_iid_reg.
1599 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1600 MonoBasicBlock *false_target)
1602 if (cfg->compile_aot) {
1603 int iid_reg = alloc_preg (cfg);
1604 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1605 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1610 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1612 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1615 /* Same as above, but obtains max_iid from a vtable */
1617 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1618 MonoBasicBlock *false_target)
1620 int max_iid_reg = alloc_preg (cfg);
1622 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1623 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1626 /* Same as above, but obtains max_iid from a klass */
1628 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1629 MonoBasicBlock *false_target)
1631 int max_iid_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1634 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1638 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1640 int idepth_reg = alloc_preg (cfg);
1641 int stypes_reg = alloc_preg (cfg);
1642 int stype = alloc_preg (cfg);
1644 mono_class_setup_supertypes (klass);
1646 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1649 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1654 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1655 } else if (cfg->compile_aot) {
1656 int const_reg = alloc_preg (cfg);
1657 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1658 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1662 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1666 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1668 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1672 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1674 int intf_reg = alloc_preg (cfg);
1676 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1677 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1682 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1686 * Variant of the above that takes a register to the class, not the vtable.
1689 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1691 int intf_bit_reg = alloc_preg (cfg);
1693 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1694 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1699 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1703 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1706 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1707 } else if (cfg->compile_aot) {
1708 int const_reg = alloc_preg (cfg);
1709 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1710 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1714 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1718 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1720 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1724 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1726 if (cfg->compile_aot) {
1727 int const_reg = alloc_preg (cfg);
1728 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1729 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1733 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1737 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1740 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1743 int rank_reg = alloc_preg (cfg);
1744 int eclass_reg = alloc_preg (cfg);
1746 g_assert (!klass_inst);
1747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1749 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1750 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1752 if (klass->cast_class == mono_defaults.object_class) {
1753 int parent_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1755 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1756 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1757 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1758 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1759 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1760 } else if (klass->cast_class == mono_defaults.enum_class) {
1761 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1762 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1763 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1765 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1766 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1769 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1770 /* Check that the object is a vector too */
1771 int bounds_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1773 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1774 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1777 int idepth_reg = alloc_preg (cfg);
1778 int stypes_reg = alloc_preg (cfg);
1779 int stype = alloc_preg (cfg);
1781 mono_class_setup_supertypes (klass);
1783 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1786 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1790 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1795 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1797 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1801 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1805 g_assert (val == 0);
1810 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1813 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1816 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1819 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1821 #if SIZEOF_REGISTER == 8
1823 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1829 val_reg = alloc_preg (cfg);
1831 if (SIZEOF_REGISTER == 8)
1832 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1834 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1837 /* This could be optimized further if neccesary */
1839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1846 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1860 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1877 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1884 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1885 g_assert (size < 10000);
1888 /* This could be optimized further if neccesary */
1890 cur_reg = alloc_preg (cfg);
1891 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1892 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1899 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1901 cur_reg = alloc_preg (cfg);
1902 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1903 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1911 cur_reg = alloc_preg (cfg);
1912 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1913 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1919 cur_reg = alloc_preg (cfg);
1920 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1921 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1937 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1941 if (cfg->compile_aot) {
1942 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1943 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1945 ins->sreg2 = c->dreg;
1946 MONO_ADD_INS (cfg->cbb, ins);
1948 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1950 ins->inst_offset = mini_get_tls_offset (tls_key);
1951 MONO_ADD_INS (cfg->cbb, ins);
1958 * Emit IR to push the current LMF onto the LMF stack.
1961 emit_push_lmf (MonoCompile *cfg)
1964 * Emit IR to push the LMF:
1965 * lmf_addr = <lmf_addr from tls>
1966 * lmf->lmf_addr = lmf_addr
1967 * lmf->prev_lmf = *lmf_addr
1970 int lmf_reg, prev_lmf_reg;
1971 MonoInst *ins, *lmf_ins;
1976 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1977 /* Load current lmf */
1978 lmf_ins = mono_get_lmf_intrinsic (cfg);
1980 MONO_ADD_INS (cfg->cbb, lmf_ins);
1981 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1982 lmf_reg = ins->dreg;
1983 /* Save previous_lmf */
1984 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1986 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1989 * Store lmf_addr in a variable, so it can be allocated to a global register.
1991 if (!cfg->lmf_addr_var)
1992 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1995 ins = mono_get_jit_tls_intrinsic (cfg);
1997 int jit_tls_dreg = ins->dreg;
1999 MONO_ADD_INS (cfg->cbb, ins);
2000 lmf_reg = alloc_preg (cfg);
2001 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2003 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2006 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2008 MONO_ADD_INS (cfg->cbb, lmf_ins);
2011 MonoInst *args [16], *jit_tls_ins, *ins;
2013 /* Inline mono_get_lmf_addr () */
2014 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2016 /* Load mono_jit_tls_id */
2017 if (cfg->compile_aot)
2018 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2020 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2021 /* call pthread_getspecific () */
2022 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2023 /* lmf_addr = &jit_tls->lmf */
2024 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2027 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2031 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2033 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2034 lmf_reg = ins->dreg;
2036 prev_lmf_reg = alloc_preg (cfg);
2037 /* Save previous_lmf */
2038 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2039 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2041 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2048 * Emit IR to pop the current LMF from the LMF stack.
2051 emit_pop_lmf (MonoCompile *cfg)
2053 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2059 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2060 lmf_reg = ins->dreg;
2062 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2063 /* Load previous_lmf */
2064 prev_lmf_reg = alloc_preg (cfg);
2065 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2067 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2070 * Emit IR to pop the LMF:
2071 * *(lmf->lmf_addr) = lmf->prev_lmf
2073 /* This could be called before emit_push_lmf () */
2074 if (!cfg->lmf_addr_var)
2075 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2076 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2078 prev_lmf_reg = alloc_preg (cfg);
2079 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2080 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2085 emit_instrumentation_call (MonoCompile *cfg, void *func)
2087 MonoInst *iargs [1];
2090 * Avoid instrumenting inlined methods since it can
2091 * distort profiling results.
2093 if (cfg->method != cfg->current_method)
2096 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2097 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2098 mono_emit_jit_icall (cfg, func, iargs);
2103 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2106 type = mini_get_underlying_type (type);
2107 switch (type->type) {
2108 case MONO_TYPE_VOID:
2109 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2116 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2120 case MONO_TYPE_FNPTR:
2121 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2122 case MONO_TYPE_CLASS:
2123 case MONO_TYPE_STRING:
2124 case MONO_TYPE_OBJECT:
2125 case MONO_TYPE_SZARRAY:
2126 case MONO_TYPE_ARRAY:
2127 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2130 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2133 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2135 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2137 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2138 case MONO_TYPE_VALUETYPE:
2139 if (type->data.klass->enumtype) {
2140 type = mono_class_enum_basetype (type->data.klass);
2143 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2144 case MONO_TYPE_TYPEDBYREF:
2145 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2146 case MONO_TYPE_GENERICINST:
2147 type = &type->data.generic_class->container_class->byval_arg;
2150 case MONO_TYPE_MVAR:
2152 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2154 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2160 * target_type_is_incompatible:
2161 * @cfg: MonoCompile context
2163 * Check that the item @arg on the evaluation stack can be stored
2164 * in the target type (can be a local, or field, etc).
2165 * The cfg arg can be used to check if we need verification or just
2168 * Returns: non-0 value if arg can't be stored on a target.
2171 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2173 MonoType *simple_type;
2176 if (target->byref) {
2177 /* FIXME: check that the pointed to types match */
2178 if (arg->type == STACK_MP)
2179 return target->type != MONO_TYPE_I && arg->klass != mono_class_from_mono_type (target);
2180 if (arg->type == STACK_PTR)
2185 simple_type = mini_get_underlying_type (target);
2186 switch (simple_type->type) {
2187 case MONO_TYPE_VOID:
2195 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2199 /* STACK_MP is needed when setting pinned locals */
2200 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2205 case MONO_TYPE_FNPTR:
2207 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2208 * in native int. (#688008).
2210 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2213 case MONO_TYPE_CLASS:
2214 case MONO_TYPE_STRING:
2215 case MONO_TYPE_OBJECT:
2216 case MONO_TYPE_SZARRAY:
2217 case MONO_TYPE_ARRAY:
2218 if (arg->type != STACK_OBJ)
2220 /* FIXME: check type compatibility */
2224 if (arg->type != STACK_I8)
2228 if (arg->type != cfg->r4_stack_type)
2232 if (arg->type != STACK_R8)
2235 case MONO_TYPE_VALUETYPE:
2236 if (arg->type != STACK_VTYPE)
2238 klass = mono_class_from_mono_type (simple_type);
2239 if (klass != arg->klass)
2242 case MONO_TYPE_TYPEDBYREF:
2243 if (arg->type != STACK_VTYPE)
2245 klass = mono_class_from_mono_type (simple_type);
2246 if (klass != arg->klass)
2249 case MONO_TYPE_GENERICINST:
2250 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2251 if (arg->type != STACK_VTYPE)
2253 klass = mono_class_from_mono_type (simple_type);
2254 /* The second cases is needed when doing partial sharing */
2255 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2259 if (arg->type != STACK_OBJ)
2261 /* FIXME: check type compatibility */
2265 case MONO_TYPE_MVAR:
2266 g_assert (cfg->gshared);
2267 if (mini_type_var_is_vt (simple_type)) {
2268 if (arg->type != STACK_VTYPE)
2271 if (arg->type != STACK_OBJ)
2276 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2282 * Prepare arguments for passing to a function call.
2283 * Return a non-zero value if the arguments can't be passed to the given
2285 * The type checks are not yet complete and some conversions may need
2286 * casts on 32 or 64 bit architectures.
2288 * FIXME: implement this using target_type_is_incompatible ()
2291 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2293 MonoType *simple_type;
2297 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2301 for (i = 0; i < sig->param_count; ++i) {
2302 if (sig->params [i]->byref) {
2303 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2307 simple_type = mini_get_underlying_type (sig->params [i]);
2309 switch (simple_type->type) {
2310 case MONO_TYPE_VOID:
2319 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2325 case MONO_TYPE_FNPTR:
2326 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2329 case MONO_TYPE_CLASS:
2330 case MONO_TYPE_STRING:
2331 case MONO_TYPE_OBJECT:
2332 case MONO_TYPE_SZARRAY:
2333 case MONO_TYPE_ARRAY:
2334 if (args [i]->type != STACK_OBJ)
2339 if (args [i]->type != STACK_I8)
2343 if (args [i]->type != cfg->r4_stack_type)
2347 if (args [i]->type != STACK_R8)
2350 case MONO_TYPE_VALUETYPE:
2351 if (simple_type->data.klass->enumtype) {
2352 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2355 if (args [i]->type != STACK_VTYPE)
2358 case MONO_TYPE_TYPEDBYREF:
2359 if (args [i]->type != STACK_VTYPE)
2362 case MONO_TYPE_GENERICINST:
2363 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2366 case MONO_TYPE_MVAR:
2368 if (args [i]->type != STACK_VTYPE)
2372 g_error ("unknown type 0x%02x in check_call_signature",
2380 callvirt_to_call (int opcode)
2383 case OP_CALL_MEMBASE:
2385 case OP_VOIDCALL_MEMBASE:
2387 case OP_FCALL_MEMBASE:
2389 case OP_RCALL_MEMBASE:
2391 case OP_VCALL_MEMBASE:
2393 case OP_LCALL_MEMBASE:
2396 g_assert_not_reached ();
2403 callvirt_to_call_reg (int opcode)
2406 case OP_CALL_MEMBASE:
2408 case OP_VOIDCALL_MEMBASE:
2409 return OP_VOIDCALL_REG;
2410 case OP_FCALL_MEMBASE:
2411 return OP_FCALL_REG;
2412 case OP_RCALL_MEMBASE:
2413 return OP_RCALL_REG;
2414 case OP_VCALL_MEMBASE:
2415 return OP_VCALL_REG;
2416 case OP_LCALL_MEMBASE:
2417 return OP_LCALL_REG;
2419 g_assert_not_reached ();
2425 /* Either METHOD or IMT_ARG needs to be set */
2427 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2431 if (COMPILE_LLVM (cfg)) {
2432 method_reg = alloc_preg (cfg);
2435 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2436 } else if (cfg->compile_aot) {
2437 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2440 MONO_INST_NEW (cfg, ins, OP_PCONST);
2441 ins->inst_p0 = method;
2442 ins->dreg = method_reg;
2443 MONO_ADD_INS (cfg->cbb, ins);
2447 call->imt_arg_reg = method_reg;
2449 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2453 method_reg = alloc_preg (cfg);
2456 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2457 } else if (cfg->compile_aot) {
2458 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2461 MONO_INST_NEW (cfg, ins, OP_PCONST);
2462 ins->inst_p0 = method;
2463 ins->dreg = method_reg;
2464 MONO_ADD_INS (cfg->cbb, ins);
2467 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2470 static MonoJumpInfo *
2471 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2473 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2477 ji->data.target = target;
2483 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2486 return mono_class_check_context_used (klass);
2492 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2495 return mono_method_check_context_used (method);
2501 * check_method_sharing:
2503 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2506 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2508 gboolean pass_vtable = FALSE;
2509 gboolean pass_mrgctx = FALSE;
2511 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2512 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2513 gboolean sharable = FALSE;
2515 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2519 * Pass vtable iff target method might
2520 * be shared, which means that sharing
2521 * is enabled for its class and its
2522 * context is sharable (and it's not a
2525 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2529 if (mini_method_get_context (cmethod) &&
2530 mini_method_get_context (cmethod)->method_inst) {
2531 g_assert (!pass_vtable);
2533 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2536 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2541 if (out_pass_vtable)
2542 *out_pass_vtable = pass_vtable;
2543 if (out_pass_mrgctx)
2544 *out_pass_mrgctx = pass_mrgctx;
2547 inline static MonoCallInst *
2548 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2549 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2553 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2561 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2563 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2565 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
2568 call->signature = sig;
2569 call->rgctx_reg = rgctx;
2570 sig_ret = mini_get_underlying_type (sig->ret);
2572 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2575 if (mini_type_is_vtype (sig_ret)) {
2576 call->vret_var = cfg->vret_addr;
2577 //g_assert_not_reached ();
2579 } else if (mini_type_is_vtype (sig_ret)) {
2580 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2583 temp->backend.is_pinvoke = sig->pinvoke;
2586 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2587 * address of return value to increase optimization opportunities.
2588 * Before vtype decomposition, the dreg of the call ins itself represents the
2589 * fact the call modifies the return value. After decomposition, the call will
2590 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2591 * will be transformed into an LDADDR.
2593 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2594 loada->dreg = alloc_preg (cfg);
2595 loada->inst_p0 = temp;
2596 /* We reference the call too since call->dreg could change during optimization */
2597 loada->inst_p1 = call;
2598 MONO_ADD_INS (cfg->cbb, loada);
2600 call->inst.dreg = temp->dreg;
2602 call->vret_var = loada;
2603 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2604 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2606 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2607 if (COMPILE_SOFT_FLOAT (cfg)) {
2609 * If the call has a float argument, we would need to do an r8->r4 conversion using
2610 * an icall, but that cannot be done during the call sequence since it would clobber
2611 * the call registers + the stack. So we do it before emitting the call.
2613 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2615 MonoInst *in = call->args [i];
2617 if (i >= sig->hasthis)
2618 t = sig->params [i - sig->hasthis];
2620 t = &mono_defaults.int_class->byval_arg;
2621 t = mono_type_get_underlying_type (t);
2623 if (!t->byref && t->type == MONO_TYPE_R4) {
2624 MonoInst *iargs [1];
2628 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2630 /* The result will be in an int vreg */
2631 call->args [i] = conv;
2637 call->need_unbox_trampoline = unbox_trampoline;
2640 if (COMPILE_LLVM (cfg))
2641 mono_llvm_emit_call (cfg, call);
2643 mono_arch_emit_call (cfg, call);
2645 mono_arch_emit_call (cfg, call);
2648 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2649 cfg->flags |= MONO_CFG_HAS_CALLS;
2655 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2657 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2658 cfg->uses_rgctx_reg = TRUE;
2659 call->rgctx_reg = TRUE;
2661 call->rgctx_arg_reg = rgctx_reg;
2665 inline static MonoInst*
2666 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2671 gboolean check_sp = FALSE;
2673 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2674 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2676 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2681 rgctx_reg = mono_alloc_preg (cfg);
2682 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2686 if (!cfg->stack_inbalance_var)
2687 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2689 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2690 ins->dreg = cfg->stack_inbalance_var->dreg;
2691 MONO_ADD_INS (cfg->cbb, ins);
2694 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2696 call->inst.sreg1 = addr->dreg;
2699 emit_imt_argument (cfg, call, NULL, imt_arg);
2701 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2706 sp_reg = mono_alloc_preg (cfg);
2708 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2710 MONO_ADD_INS (cfg->cbb, ins);
2712 /* Restore the stack so we don't crash when throwing the exception */
2713 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2714 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2715 MONO_ADD_INS (cfg->cbb, ins);
2717 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2718 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2722 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2724 return (MonoInst*)call;
2728 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2731 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2733 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2736 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2737 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2739 #ifndef DISABLE_REMOTING
2740 gboolean might_be_remote = FALSE;
2742 gboolean virtual = this_ins != NULL;
2743 gboolean enable_for_aot = TRUE;
2746 MonoInst *call_target = NULL;
2748 gboolean need_unbox_trampoline;
2751 sig = mono_method_signature (method);
2753 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
2754 MonoInst *icall_args [16];
2757 // FIXME: Optimize this
2759 guint32 imt_slot = mono_method_get_imt_slot (method);
2761 icall_args [0] = this_ins;
2762 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
2764 icall_args [2] = imt_arg;
2766 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, method);
2767 icall_args [2] = ins;
2769 EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
2771 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
2775 rgctx_reg = mono_alloc_preg (cfg);
2776 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2779 if (method->string_ctor) {
2780 /* Create the real signature */
2781 /* FIXME: Cache these */
2782 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2783 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2788 context_used = mini_method_check_context_used (cfg, method);
2790 #ifndef DISABLE_REMOTING
2791 might_be_remote = this_ins && sig->hasthis &&
2792 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2793 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2795 if (might_be_remote && context_used) {
2798 g_assert (cfg->gshared);
2800 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2802 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2806 if (cfg->llvm_only && !call_target && virtual && (method->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
2807 // FIXME: Vcall optimizations below
2808 MonoInst *icall_args [16];
2811 if (sig->generic_param_count) {
2813 * Generic virtual call, pass the concrete method as the imt argument.
2815 imt_arg = emit_get_rgctx_method (cfg, context_used,
2816 method, MONO_RGCTX_INFO_METHOD);
2819 // FIXME: Optimize this
2821 int slot = mono_method_get_vtable_index (method);
2823 icall_args [0] = this_ins;
2824 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
2826 icall_args [2] = imt_arg;
2828 EMIT_NEW_PCONST (cfg, ins, NULL);
2829 icall_args [2] = ins;
2831 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall, icall_args);
2834 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2836 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2838 #ifndef DISABLE_REMOTING
2839 if (might_be_remote)
2840 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2843 call->method = method;
2844 call->inst.flags |= MONO_INST_HAS_METHOD;
2845 call->inst.inst_left = this_ins;
2846 call->tail_call = tail;
2849 int vtable_reg, slot_reg, this_reg;
2852 this_reg = this_ins->dreg;
2854 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2855 MonoInst *dummy_use;
2857 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2859 /* Make a call to delegate->invoke_impl */
2860 call->inst.inst_basereg = this_reg;
2861 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2862 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2864 /* We must emit a dummy use here because the delegate trampoline will
2865 replace the 'this' argument with the delegate target making this activation
2866 no longer a root for the delegate.
2867 This is an issue for delegates that target collectible code such as dynamic
2868 methods of GC'able assemblies.
2870 For a test case look into #667921.
2872 FIXME: a dummy use is not the best way to do it as the local register allocator
2873 will put it on a caller save register and spil it around the call.
2874 Ideally, we would either put it on a callee save register or only do the store part.
2876 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2878 return (MonoInst*)call;
2881 if ((!cfg->compile_aot || enable_for_aot) &&
2882 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2883 (MONO_METHOD_IS_FINAL (method) &&
2884 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2885 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2887 * the method is not virtual, we just need to ensure this is not null
2888 * and then we can call the method directly.
2890 #ifndef DISABLE_REMOTING
2891 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2893 * The check above ensures method is not gshared, this is needed since
2894 * gshared methods can't have wrappers.
2896 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2900 if (!method->string_ctor)
2901 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2903 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2904 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2906 * the method is virtual, but we can statically dispatch since either
2907 * it's class or the method itself are sealed.
2908 * But first we need to ensure it's not a null reference.
2910 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2912 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2913 } else if (call_target) {
2914 vtable_reg = alloc_preg (cfg);
2915 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2917 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2918 call->inst.sreg1 = call_target->dreg;
2919 call->inst.flags &= !MONO_INST_HAS_METHOD;
2921 vtable_reg = alloc_preg (cfg);
2922 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2923 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2924 guint32 imt_slot = mono_method_get_imt_slot (method);
2925 emit_imt_argument (cfg, call, call->method, imt_arg);
2926 slot_reg = vtable_reg;
2927 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2929 slot_reg = vtable_reg;
2930 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2931 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2933 g_assert (mono_method_signature (method)->generic_param_count);
2934 emit_imt_argument (cfg, call, call->method, imt_arg);
2938 call->inst.sreg1 = slot_reg;
2939 call->inst.inst_offset = offset;
2940 call->is_virtual = TRUE;
2944 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2947 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2949 return (MonoInst*)call;
2953 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2955 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2959 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2966 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2969 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2971 return (MonoInst*)call;
2975 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2977 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2981 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2985 * mono_emit_abs_call:
2987 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2989 inline static MonoInst*
2990 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2991 MonoMethodSignature *sig, MonoInst **args)
2993 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2997 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
3000 if (cfg->abs_patches == NULL)
3001 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3002 g_hash_table_insert (cfg->abs_patches, ji, ji);
3003 ins = mono_emit_native_call (cfg, ji, sig, args);
3004 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3009 direct_icalls_enabled (MonoCompile *cfg)
3011 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3013 if (cfg->compile_llvm)
3016 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3022 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3025 * Call the jit icall without a wrapper if possible.
3026 * The wrapper is needed for the following reasons:
3027 * - to handle exceptions thrown using mono_raise_exceptions () from the
3028 * icall function. The EH code needs the lmf frame pushed by the
3029 * wrapper to be able to unwind back to managed code.
3030 * - to be able to do stack walks for asynchronously suspended
3031 * threads when debugging.
3033 if (info->no_raise && direct_icalls_enabled (cfg)) {
3037 if (!info->wrapper_method) {
3038 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3039 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3041 mono_memory_barrier ();
3045 * Inline the wrapper method, which is basically a call to the C icall, and
3046 * an exception check.
3048 costs = inline_method (cfg, info->wrapper_method, NULL,
3049 args, NULL, cfg->real_offset, TRUE);
3050 g_assert (costs > 0);
3051 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3055 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3060 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3062 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3063 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3067 * Native code might return non register sized integers
3068 * without initializing the upper bits.
3070 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3071 case OP_LOADI1_MEMBASE:
3072 widen_op = OP_ICONV_TO_I1;
3074 case OP_LOADU1_MEMBASE:
3075 widen_op = OP_ICONV_TO_U1;
3077 case OP_LOADI2_MEMBASE:
3078 widen_op = OP_ICONV_TO_I2;
3080 case OP_LOADU2_MEMBASE:
3081 widen_op = OP_ICONV_TO_U2;
3087 if (widen_op != -1) {
3088 int dreg = alloc_preg (cfg);
3091 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3092 widen->type = ins->type;
3102 get_memcpy_method (void)
3104 static MonoMethod *memcpy_method = NULL;
3105 if (!memcpy_method) {
3106 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3108 g_error ("Old corlib found. Install a new one");
3110 return memcpy_method;
3114 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3116 MonoClassField *field;
3117 gpointer iter = NULL;
3119 while ((field = mono_class_get_fields (klass, &iter))) {
3122 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3124 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3125 if (mini_type_is_reference (mono_field_get_type (field))) {
3126 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3127 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3129 MonoClass *field_class = mono_class_from_mono_type (field->type);
3130 if (field_class->has_references)
3131 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3137 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3139 int card_table_shift_bits;
3140 gpointer card_table_mask;
3142 MonoInst *dummy_use;
3143 int nursery_shift_bits;
3144 size_t nursery_size;
3146 if (!cfg->gen_write_barriers)
3149 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3151 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3153 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3156 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3157 wbarrier->sreg1 = ptr->dreg;
3158 wbarrier->sreg2 = value->dreg;
3159 MONO_ADD_INS (cfg->cbb, wbarrier);
3160 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3161 int offset_reg = alloc_preg (cfg);
3162 int card_reg = alloc_preg (cfg);
3165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3166 if (card_table_mask)
3167 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3169 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3170 * IMM's larger than 32bits.
3172 if (cfg->compile_aot) {
3173 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3175 MONO_INST_NEW (cfg, ins, OP_PCONST);
3176 ins->inst_p0 = card_table;
3177 ins->dreg = card_reg;
3178 MONO_ADD_INS (cfg->cbb, ins);
3181 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3182 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3184 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3185 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3188 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3192 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3194 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3195 unsigned need_wb = 0;
3200 /*types with references can't have alignment smaller than sizeof(void*) */
3201 if (align < SIZEOF_VOID_P)
3204 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3205 if (size > 32 * SIZEOF_VOID_P)
3208 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3210 /* We don't unroll more than 5 stores to avoid code bloat. */
3211 if (size > 5 * SIZEOF_VOID_P) {
3212 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3213 size += (SIZEOF_VOID_P - 1);
3214 size &= ~(SIZEOF_VOID_P - 1);
3216 EMIT_NEW_ICONST (cfg, iargs [2], size);
3217 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3218 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3222 destreg = iargs [0]->dreg;
3223 srcreg = iargs [1]->dreg;
3226 dest_ptr_reg = alloc_preg (cfg);
3227 tmp_reg = alloc_preg (cfg);
3230 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3232 while (size >= SIZEOF_VOID_P) {
3233 MonoInst *load_inst;
3234 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3235 load_inst->dreg = tmp_reg;
3236 load_inst->inst_basereg = srcreg;
3237 load_inst->inst_offset = offset;
3238 MONO_ADD_INS (cfg->cbb, load_inst);
3240 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3243 emit_write_barrier (cfg, iargs [0], load_inst);
3245 offset += SIZEOF_VOID_P;
3246 size -= SIZEOF_VOID_P;
3249 /*tmp += sizeof (void*)*/
3250 if (size >= SIZEOF_VOID_P) {
3251 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3252 MONO_ADD_INS (cfg->cbb, iargs [0]);
3256 /* Those cannot be references since size < sizeof (void*) */
3258 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3259 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3265 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3266 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3272 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3273 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3282 * Emit code to copy a valuetype of type @klass whose address is stored in
3283 * @src->dreg to memory whose address is stored at @dest->dreg.
3286 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3288 MonoInst *iargs [4];
3291 MonoMethod *memcpy_method;
3292 MonoInst *size_ins = NULL;
3293 MonoInst *memcpy_ins = NULL;
3297 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3300 * This check breaks with spilled vars... need to handle it during verification anyway.
3301 * g_assert (klass && klass == src->klass && klass == dest->klass);
3304 if (mini_is_gsharedvt_klass (klass)) {
3306 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3307 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3311 n = mono_class_native_size (klass, &align);
3313 n = mono_class_value_size (klass, &align);
3315 /* if native is true there should be no references in the struct */
3316 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3317 /* Avoid barriers when storing to the stack */
3318 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3319 (dest->opcode == OP_LDADDR))) {
3325 context_used = mini_class_check_context_used (cfg, klass);
3327 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3328 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3330 } else if (context_used) {
3331 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3333 if (cfg->compile_aot) {
3334 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3336 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3337 mono_class_compute_gc_descriptor (klass);
3342 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3344 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3349 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3350 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3351 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3356 iargs [2] = size_ins;
3358 EMIT_NEW_ICONST (cfg, iargs [2], n);
3360 memcpy_method = get_memcpy_method ();
3362 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3364 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3369 get_memset_method (void)
3371 static MonoMethod *memset_method = NULL;
3372 if (!memset_method) {
3373 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3375 g_error ("Old corlib found. Install a new one");
3377 return memset_method;
3381 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3383 MonoInst *iargs [3];
3386 MonoMethod *memset_method;
3387 MonoInst *size_ins = NULL;
3388 MonoInst *bzero_ins = NULL;
3389 static MonoMethod *bzero_method;
3391 /* FIXME: Optimize this for the case when dest is an LDADDR */
3392 mono_class_init (klass);
3393 if (mini_is_gsharedvt_klass (klass)) {
3394 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3395 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3397 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3398 g_assert (bzero_method);
3400 iargs [1] = size_ins;
3401 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3405 n = mono_class_value_size (klass, &align);
3407 if (n <= sizeof (gpointer) * 8) {
3408 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3411 memset_method = get_memset_method ();
3413 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3414 EMIT_NEW_ICONST (cfg, iargs [2], n);
3415 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3422 * Emit IR to return either the this pointer for instance method,
3423 * or the mrgctx for static methods.
3426 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3428 MonoInst *this_ins = NULL;
3430 g_assert (cfg->gshared);
3432 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3433 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3434 !method->klass->valuetype)
3435 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3437 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3438 MonoInst *mrgctx_loc, *mrgctx_var;
3440 g_assert (!this_ins);
3441 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3443 mrgctx_loc = mono_get_vtable_var (cfg);
3444 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3447 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3448 MonoInst *vtable_loc, *vtable_var;
3450 g_assert (!this_ins);
3452 vtable_loc = mono_get_vtable_var (cfg);
3453 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3455 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3456 MonoInst *mrgctx_var = vtable_var;
3459 vtable_reg = alloc_preg (cfg);
3460 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3461 vtable_var->type = STACK_PTR;
3469 vtable_reg = alloc_preg (cfg);
3470 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3475 static MonoJumpInfoRgctxEntry *
3476 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3478 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3479 res->method = method;
3480 res->in_mrgctx = in_mrgctx;
3481 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3482 res->data->type = patch_type;
3483 res->data->data.target = patch_data;
3484 res->info_type = info_type;
3489 static inline MonoInst*
3490 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3492 MonoInst *args [16];
3495 // FIXME: No fastpath since the slot is not a compile time constant
3497 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3498 if (entry->in_mrgctx)
3499 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3501 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3505 * FIXME: This can be called during decompose, which is a problem since it creates
3507 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3509 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3511 MonoBasicBlock *is_null_bb, *end_bb;
3512 MonoInst *res, *ins, *call;
3515 slot = mini_get_rgctx_entry_slot (entry);
3517 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3518 index = MONO_RGCTX_SLOT_INDEX (slot);
3520 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3521 for (depth = 0; ; ++depth) {
3522 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3524 if (index < size - 1)
3529 NEW_BBLOCK (cfg, end_bb);
3530 NEW_BBLOCK (cfg, is_null_bb);
3533 rgctx_reg = rgctx->dreg;
3535 rgctx_reg = alloc_preg (cfg);
3537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3538 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3539 NEW_BBLOCK (cfg, is_null_bb);
3541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3542 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3545 for (i = 0; i < depth; ++i) {
3546 int array_reg = alloc_preg (cfg);
3548 /* load ptr to next array */
3549 if (mrgctx && i == 0)
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3553 rgctx_reg = array_reg;
3554 /* is the ptr null? */
3555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3556 /* if yes, jump to actual trampoline */
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3561 val_reg = alloc_preg (cfg);
3562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3563 /* is the slot null? */
3564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3565 /* if yes, jump to actual trampoline */
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3569 res_reg = alloc_preg (cfg);
3570 MONO_INST_NEW (cfg, ins, OP_MOVE);
3571 ins->dreg = res_reg;
3572 ins->sreg1 = val_reg;
3573 MONO_ADD_INS (cfg->cbb, ins);
3575 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3578 MONO_START_BB (cfg, is_null_bb);
3580 EMIT_NEW_ICONST (cfg, args [1], index);
3582 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3584 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3585 MONO_INST_NEW (cfg, ins, OP_MOVE);
3586 ins->dreg = res_reg;
3587 ins->sreg1 = call->dreg;
3588 MONO_ADD_INS (cfg->cbb, ins);
3589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3591 MONO_START_BB (cfg, end_bb);
3600 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3603 static inline MonoInst*
3604 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3607 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3609 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3613 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3614 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3616 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3617 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3619 return emit_rgctx_fetch (cfg, rgctx, entry);
3623 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3624 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3626 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3627 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3629 return emit_rgctx_fetch (cfg, rgctx, entry);
3633 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3634 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3636 MonoJumpInfoGSharedVtCall *call_info;
3637 MonoJumpInfoRgctxEntry *entry;
3640 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3641 call_info->sig = sig;
3642 call_info->method = cmethod;
3644 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3645 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3647 return emit_rgctx_fetch (cfg, rgctx, entry);
3651 * emit_get_rgctx_virt_method:
3653 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3656 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3657 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3659 MonoJumpInfoVirtMethod *info;
3660 MonoJumpInfoRgctxEntry *entry;
3663 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3664 info->klass = klass;
3665 info->method = virt_method;
3667 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3668 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3670 return emit_rgctx_fetch (cfg, rgctx, entry);
3674 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3675 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3677 MonoJumpInfoRgctxEntry *entry;
3680 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3681 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3683 return emit_rgctx_fetch (cfg, rgctx, entry);
3687 * emit_get_rgctx_method:
3689 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3690 * normal constants, else emit a load from the rgctx.
3693 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3694 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3696 if (!context_used) {
3699 switch (rgctx_type) {
3700 case MONO_RGCTX_INFO_METHOD:
3701 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3703 case MONO_RGCTX_INFO_METHOD_RGCTX:
3704 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3707 g_assert_not_reached ();
3710 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3711 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3713 return emit_rgctx_fetch (cfg, rgctx, entry);
3718 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3719 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3721 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3722 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3724 return emit_rgctx_fetch (cfg, rgctx, entry);
3728 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3730 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3731 MonoRuntimeGenericContextInfoTemplate *template;
3736 for (i = 0; i < info->num_entries; ++i) {
3737 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3739 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3743 if (info->num_entries == info->count_entries) {
3744 MonoRuntimeGenericContextInfoTemplate *new_entries;
3745 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3747 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3749 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3750 info->entries = new_entries;
3751 info->count_entries = new_count_entries;
3754 idx = info->num_entries;
3755 template = &info->entries [idx];
3756 template->info_type = rgctx_type;
3757 template->data = data;
3759 info->num_entries ++;
3765 * emit_get_gsharedvt_info:
3767 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3770 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3775 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3776 /* Load info->entries [idx] */
3777 dreg = alloc_preg (cfg);
3778 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3784 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3786 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3790 * On return the caller must check @klass for load errors.
3793 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3795 MonoInst *vtable_arg;
3798 context_used = mini_class_check_context_used (cfg, klass);
3801 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3802 klass, MONO_RGCTX_INFO_VTABLE);
3804 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3808 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3811 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3815 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3816 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3818 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3819 ins->sreg1 = vtable_arg->dreg;
3820 MONO_ADD_INS (cfg->cbb, ins);
3822 static int byte_offset = -1;
3823 static guint8 bitmask;
3824 int bits_reg, inited_reg;
3825 MonoBasicBlock *inited_bb;
3826 MonoInst *args [16];
3828 if (byte_offset < 0)
3829 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3831 bits_reg = alloc_ireg (cfg);
3832 inited_reg = alloc_ireg (cfg);
3834 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3837 NEW_BBLOCK (cfg, inited_bb);
3839 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3840 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3842 args [0] = vtable_arg;
3843 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3845 MONO_START_BB (cfg, inited_bb);
3850 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3854 if (cfg->gen_seq_points && cfg->method == method) {
3855 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3857 ins->flags |= MONO_INST_NONEMPTY_STACK;
3858 MONO_ADD_INS (cfg->cbb, ins);
3863 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3865 if (mini_get_debug_options ()->better_cast_details) {
3866 int vtable_reg = alloc_preg (cfg);
3867 int klass_reg = alloc_preg (cfg);
3868 MonoBasicBlock *is_null_bb = NULL;
3870 int to_klass_reg, context_used;
3873 NEW_BBLOCK (cfg, is_null_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3879 tls_get = mono_get_jit_tls_intrinsic (cfg);
3881 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3885 MONO_ADD_INS (cfg->cbb, tls_get);
3886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3889 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3891 context_used = mini_class_check_context_used (cfg, klass);
3893 MonoInst *class_ins;
3895 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3896 to_klass_reg = class_ins->dreg;
3898 to_klass_reg = alloc_preg (cfg);
3899 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3904 MONO_START_BB (cfg, is_null_bb);
3909 reset_cast_details (MonoCompile *cfg)
3911 /* Reset the variables holding the cast details */
3912 if (mini_get_debug_options ()->better_cast_details) {
3913 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3915 MONO_ADD_INS (cfg->cbb, tls_get);
3916 /* It is enough to reset the from field */
3917 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3922 * On return the caller must check @array_class for load errors
3925 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3927 int vtable_reg = alloc_preg (cfg);
3930 context_used = mini_class_check_context_used (cfg, array_class);
3932 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3934 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3936 if (cfg->opt & MONO_OPT_SHARED) {
3937 int class_reg = alloc_preg (cfg);
3938 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3939 if (cfg->compile_aot) {
3940 int klass_reg = alloc_preg (cfg);
3941 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3942 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3944 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3946 } else if (context_used) {
3947 MonoInst *vtable_ins;
3949 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3950 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3952 if (cfg->compile_aot) {
3956 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3958 vt_reg = alloc_preg (cfg);
3959 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3960 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3963 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3965 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3969 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3971 reset_cast_details (cfg);
3975 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3976 * generic code is generated.
3979 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3981 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3984 MonoInst *rgctx, *addr;
3986 /* FIXME: What if the class is shared? We might not
3987 have to get the address of the method from the
3989 addr = emit_get_rgctx_method (cfg, context_used, method,
3990 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3992 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3994 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3996 gboolean pass_vtable, pass_mrgctx;
3997 MonoInst *rgctx_arg = NULL;
3999 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4000 g_assert (!pass_mrgctx);
4003 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4006 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4009 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4014 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4018 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4019 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4020 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4021 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4023 obj_reg = sp [0]->dreg;
4024 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4025 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4027 /* FIXME: generics */
4028 g_assert (klass->rank == 0);
4031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4032 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4034 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4038 MonoInst *element_class;
4040 /* This assertion is from the unboxcast insn */
4041 g_assert (klass->rank == 0);
4043 element_class = emit_get_rgctx_klass (cfg, context_used,
4044 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4046 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4047 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4049 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4050 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4051 reset_cast_details (cfg);
4054 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4055 MONO_ADD_INS (cfg->cbb, add);
4056 add->type = STACK_MP;
4063 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4065 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4066 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4070 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4076 args [1] = klass_inst;
4079 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4081 NEW_BBLOCK (cfg, is_ref_bb);
4082 NEW_BBLOCK (cfg, is_nullable_bb);
4083 NEW_BBLOCK (cfg, end_bb);
4084 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4086 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4091 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4092 addr_reg = alloc_dreg (cfg, STACK_MP);
4096 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4097 MONO_ADD_INS (cfg->cbb, addr);
4099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4102 MONO_START_BB (cfg, is_ref_bb);
4104 /* Save the ref to a temporary */
4105 dreg = alloc_ireg (cfg);
4106 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4107 addr->dreg = addr_reg;
4108 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4109 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4112 MONO_START_BB (cfg, is_nullable_bb);
4115 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4116 MonoInst *unbox_call;
4117 MonoMethodSignature *unbox_sig;
4119 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4120 unbox_sig->ret = &klass->byval_arg;
4121 unbox_sig->param_count = 1;
4122 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4123 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4125 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4126 addr->dreg = addr_reg;
4129 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4132 MONO_START_BB (cfg, end_bb);
4135 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4141 * Returns NULL and set the cfg exception on error.
4144 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4146 MonoInst *iargs [2];
4152 MonoInst *iargs [2];
4153 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4155 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4157 if (cfg->opt & MONO_OPT_SHARED)
4158 rgctx_info = MONO_RGCTX_INFO_KLASS;
4160 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4161 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4163 if (cfg->opt & MONO_OPT_SHARED) {
4164 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4166 alloc_ftn = mono_object_new;
4169 alloc_ftn = mono_object_new_specific;
4172 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4173 if (known_instance_size) {
4174 int size = mono_class_instance_size (klass);
4175 if (size < sizeof (MonoObject))
4176 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4178 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4180 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4183 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4186 if (cfg->opt & MONO_OPT_SHARED) {
4187 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4188 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4190 alloc_ftn = mono_object_new;
4191 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4192 /* This happens often in argument checking code, eg. throw new FooException... */
4193 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4194 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4195 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4197 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4198 MonoMethod *managed_alloc = NULL;
4202 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4203 cfg->exception_ptr = klass;
4207 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4209 if (managed_alloc) {
4210 int size = mono_class_instance_size (klass);
4211 if (size < sizeof (MonoObject))
4212 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4214 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4215 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4216 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4218 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4220 guint32 lw = vtable->klass->instance_size;
4221 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4222 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4223 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4226 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4230 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4234 * Returns NULL and set the cfg exception on error.
4237 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4239 MonoInst *alloc, *ins;
4241 if (mono_class_is_nullable (klass)) {
4242 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4245 /* FIXME: What if the class is shared? We might not
4246 have to get the method address from the RGCTX. */
4247 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4248 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4249 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4251 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4253 gboolean pass_vtable, pass_mrgctx;
4254 MonoInst *rgctx_arg = NULL;
4256 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4257 g_assert (!pass_mrgctx);
4260 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4263 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4266 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4270 if (mini_is_gsharedvt_klass (klass)) {
4271 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4272 MonoInst *res, *is_ref, *src_var, *addr;
4275 dreg = alloc_ireg (cfg);
4277 NEW_BBLOCK (cfg, is_ref_bb);
4278 NEW_BBLOCK (cfg, is_nullable_bb);
4279 NEW_BBLOCK (cfg, end_bb);
4280 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4285 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4288 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4291 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4292 ins->opcode = OP_STOREV_MEMBASE;
4294 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4295 res->type = STACK_OBJ;
4297 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4300 MONO_START_BB (cfg, is_ref_bb);
4302 /* val is a vtype, so has to load the value manually */
4303 src_var = get_vreg_to_inst (cfg, val->dreg);
4305 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4306 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4307 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4308 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4311 MONO_START_BB (cfg, is_nullable_bb);
4314 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4315 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4317 MonoMethodSignature *box_sig;
4320 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4321 * construct that method at JIT time, so have to do things by hand.
4323 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4324 box_sig->ret = &mono_defaults.object_class->byval_arg;
4325 box_sig->param_count = 1;
4326 box_sig->params [0] = &klass->byval_arg;
4327 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4328 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4329 res->type = STACK_OBJ;
4333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4335 MONO_START_BB (cfg, end_bb);
4339 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4343 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4349 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4352 MonoGenericContainer *container;
4353 MonoGenericInst *ginst;
4355 if (klass->generic_class) {
4356 container = klass->generic_class->container_class->generic_container;
4357 ginst = klass->generic_class->context.class_inst;
4358 } else if (klass->generic_container && context_used) {
4359 container = klass->generic_container;
4360 ginst = container->context.class_inst;
4365 for (i = 0; i < container->type_argc; ++i) {
4367 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4369 type = ginst->type_argv [i];
4370 if (mini_type_is_reference (type))
4376 static GHashTable* direct_icall_type_hash;
4379 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4381 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4382 if (!direct_icalls_enabled (cfg))
4386 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4387 * Whitelist a few icalls for now.
4389 if (!direct_icall_type_hash) {
4390 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4392 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4393 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4394 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4395 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4396 mono_memory_barrier ();
4397 direct_icall_type_hash = h;
4400 if (cmethod->klass == mono_defaults.math_class)
4402 /* No locking needed */
4403 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4408 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4411 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4413 MonoMethod *mono_castclass;
4416 mono_castclass = mono_marshal_get_castclass_with_cache ();
4418 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4419 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4420 reset_cast_details (cfg);
4426 get_castclass_cache_idx (MonoCompile *cfg)
4428 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4429 cfg->castclass_cache_index ++;
4430 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4434 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4443 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4446 if (cfg->compile_aot) {
4447 idx = get_castclass_cache_idx (cfg);
4448 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4450 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4453 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4454 return emit_castclass_with_cache (cfg, klass, args);
4458 * Returns NULL and set the cfg exception on error.
4461 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4463 MonoBasicBlock *is_null_bb;
4464 int obj_reg = src->dreg;
4465 int vtable_reg = alloc_preg (cfg);
4467 MonoInst *klass_inst = NULL, *res;
4469 context_used = mini_class_check_context_used (cfg, klass);
4471 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4472 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4473 (*inline_costs) += 2;
4475 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4476 MonoMethod *mono_castclass;
4477 MonoInst *iargs [1];
4480 mono_castclass = mono_marshal_get_castclass (klass);
4483 save_cast_details (cfg, klass, src->dreg, TRUE);
4484 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4485 iargs, ip, cfg->real_offset, TRUE);
4486 reset_cast_details (cfg);
4487 CHECK_CFG_EXCEPTION;
4488 g_assert (costs > 0);
4490 cfg->real_offset += 5;
4492 (*inline_costs) += costs;
4500 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4501 MonoInst *cache_ins;
4503 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4508 /* klass - it's the second element of the cache entry*/
4509 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4512 args [2] = cache_ins;
4514 return emit_castclass_with_cache (cfg, klass, args);
4517 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4520 NEW_BBLOCK (cfg, is_null_bb);
4522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4523 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4525 save_cast_details (cfg, klass, obj_reg, FALSE);
4527 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4528 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4529 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4531 int klass_reg = alloc_preg (cfg);
4533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4535 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4536 /* the remoting code is broken, access the class for now */
4537 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4538 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4540 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4541 cfg->exception_ptr = klass;
4544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4549 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4552 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4556 MONO_START_BB (cfg, is_null_bb);
4558 reset_cast_details (cfg);
4567 * Returns NULL and set the cfg exception on error.
4570 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4573 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4574 int obj_reg = src->dreg;
4575 int vtable_reg = alloc_preg (cfg);
4576 int res_reg = alloc_ireg_ref (cfg);
4577 MonoInst *klass_inst = NULL;
4582 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4583 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4584 MonoInst *cache_ins;
4586 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4591 /* klass - it's the second element of the cache entry*/
4592 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4595 args [2] = cache_ins;
4597 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4600 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4603 NEW_BBLOCK (cfg, is_null_bb);
4604 NEW_BBLOCK (cfg, false_bb);
4605 NEW_BBLOCK (cfg, end_bb);
4607 /* Do the assignment at the beginning, so the other assignment can be if converted */
4608 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4609 ins->type = STACK_OBJ;
4612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4613 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4617 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4618 g_assert (!context_used);
4619 /* the is_null_bb target simply copies the input register to the output */
4620 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4622 int klass_reg = alloc_preg (cfg);
4625 int rank_reg = alloc_preg (cfg);
4626 int eclass_reg = alloc_preg (cfg);
4628 g_assert (!context_used);
4629 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4634 if (klass->cast_class == mono_defaults.object_class) {
4635 int parent_reg = alloc_preg (cfg);
4636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4637 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4638 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4639 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4640 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4641 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4642 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4644 } else if (klass->cast_class == mono_defaults.enum_class) {
4645 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4646 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4647 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4648 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4650 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4651 /* Check that the object is a vector too */
4652 int bounds_reg = alloc_preg (cfg);
4653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4658 /* the is_null_bb target simply copies the input register to the output */
4659 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4661 } else if (mono_class_is_nullable (klass)) {
4662 g_assert (!context_used);
4663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4664 /* the is_null_bb target simply copies the input register to the output */
4665 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4667 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4668 g_assert (!context_used);
4669 /* the remoting code is broken, access the class for now */
4670 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4671 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4673 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4674 cfg->exception_ptr = klass;
4677 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4686 /* the is_null_bb target simply copies the input register to the output */
4687 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4692 MONO_START_BB (cfg, false_bb);
4694 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4697 MONO_START_BB (cfg, is_null_bb);
4699 MONO_START_BB (cfg, end_bb);
4705 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4707 /* This opcode takes as input an object reference and a class, and returns:
4708 0) if the object is an instance of the class,
4709 1) if the object is not instance of the class,
4710 2) if the object is a proxy whose type cannot be determined */
4713 #ifndef DISABLE_REMOTING
4714 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4716 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4718 int obj_reg = src->dreg;
4719 int dreg = alloc_ireg (cfg);
4721 #ifndef DISABLE_REMOTING
4722 int klass_reg = alloc_preg (cfg);
4725 NEW_BBLOCK (cfg, true_bb);
4726 NEW_BBLOCK (cfg, false_bb);
4727 NEW_BBLOCK (cfg, end_bb);
4728 #ifndef DISABLE_REMOTING
4729 NEW_BBLOCK (cfg, false2_bb);
4730 NEW_BBLOCK (cfg, no_proxy_bb);
4733 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4736 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4737 #ifndef DISABLE_REMOTING
4738 NEW_BBLOCK (cfg, interface_fail_bb);
4741 tmp_reg = alloc_preg (cfg);
4742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4743 #ifndef DISABLE_REMOTING
4744 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4745 MONO_START_BB (cfg, interface_fail_bb);
4746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4748 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4750 tmp_reg = alloc_preg (cfg);
4751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4753 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4755 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4758 #ifndef DISABLE_REMOTING
4759 tmp_reg = alloc_preg (cfg);
4760 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4763 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4764 tmp_reg = alloc_preg (cfg);
4765 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4768 tmp_reg = alloc_preg (cfg);
4769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4770 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4773 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4774 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4776 MONO_START_BB (cfg, no_proxy_bb);
4778 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4780 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4784 MONO_START_BB (cfg, false_bb);
4786 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4787 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4789 #ifndef DISABLE_REMOTING
4790 MONO_START_BB (cfg, false2_bb);
4792 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4793 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4796 MONO_START_BB (cfg, true_bb);
4798 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4800 MONO_START_BB (cfg, end_bb);
4803 MONO_INST_NEW (cfg, ins, OP_ICONST);
4805 ins->type = STACK_I4;
4811 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4813 /* This opcode takes as input an object reference and a class, and returns:
4814 0) if the object is an instance of the class,
4815 1) if the object is a proxy whose type cannot be determined
4816 an InvalidCastException exception is thrown otherwhise*/
4819 #ifndef DISABLE_REMOTING
4820 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4822 MonoBasicBlock *ok_result_bb;
4824 int obj_reg = src->dreg;
4825 int dreg = alloc_ireg (cfg);
4826 int tmp_reg = alloc_preg (cfg);
4828 #ifndef DISABLE_REMOTING
4829 int klass_reg = alloc_preg (cfg);
4830 NEW_BBLOCK (cfg, end_bb);
4833 NEW_BBLOCK (cfg, ok_result_bb);
4835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4836 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4838 save_cast_details (cfg, klass, obj_reg, FALSE);
4840 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4841 #ifndef DISABLE_REMOTING
4842 NEW_BBLOCK (cfg, interface_fail_bb);
4844 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4845 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4846 MONO_START_BB (cfg, interface_fail_bb);
4847 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4849 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4851 tmp_reg = alloc_preg (cfg);
4852 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4854 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4856 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4857 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4859 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4860 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4861 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4864 #ifndef DISABLE_REMOTING
4865 NEW_BBLOCK (cfg, no_proxy_bb);
4867 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4868 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4869 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4871 tmp_reg = alloc_preg (cfg);
4872 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4873 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4875 tmp_reg = alloc_preg (cfg);
4876 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4878 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4880 NEW_BBLOCK (cfg, fail_1_bb);
4882 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4884 MONO_START_BB (cfg, fail_1_bb);
4886 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4887 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4889 MONO_START_BB (cfg, no_proxy_bb);
4891 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4893 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4897 MONO_START_BB (cfg, ok_result_bb);
4899 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4901 #ifndef DISABLE_REMOTING
4902 MONO_START_BB (cfg, end_bb);
4906 MONO_INST_NEW (cfg, ins, OP_ICONST);
4908 ins->type = STACK_I4;
4913 static G_GNUC_UNUSED MonoInst*
4914 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4916 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4917 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4920 switch (enum_type->type) {
4923 #if SIZEOF_REGISTER == 8
4935 MonoInst *load, *and, *cmp, *ceq;
4936 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4937 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4938 int dest_reg = alloc_ireg (cfg);
4940 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4941 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4942 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4943 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4945 ceq->type = STACK_I4;
4948 load = mono_decompose_opcode (cfg, load);
4949 and = mono_decompose_opcode (cfg, and);
4950 cmp = mono_decompose_opcode (cfg, cmp);
4951 ceq = mono_decompose_opcode (cfg, ceq);
4959 * Returns NULL and set the cfg exception on error.
4961 static G_GNUC_UNUSED MonoInst*
4962 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4966 gpointer trampoline;
4967 MonoInst *obj, *method_ins, *tramp_ins;
4971 if (virtual && !cfg->llvm_only) {
4972 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4975 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4979 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4983 if (cfg->llvm_only) {
4984 MonoInst *args [16];
4987 * If the method to be called needs an rgctx, we can't fall back to mono_delegate_ctor (), since it might receive
4988 * the address of a gshared method. So use a JIT icall.
4989 * FIXME: Optimize this.
4993 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4994 mono_emit_jit_icall (cfg, virtual ? mono_init_delegate_virtual : mono_init_delegate, args);
4999 /* Inline the contents of mono_delegate_ctor */
5001 /* Set target field */
5002 /* Optimize away setting of NULL target */
5003 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5004 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5005 if (cfg->gen_write_barriers) {
5006 dreg = alloc_preg (cfg);
5007 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5008 emit_write_barrier (cfg, ptr, target);
5012 /* Set method field */
5013 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5014 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5017 * To avoid looking up the compiled code belonging to the target method
5018 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5019 * store it, and we fill it after the method has been compiled.
5021 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5022 MonoInst *code_slot_ins;
5025 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5027 domain = mono_domain_get ();
5028 mono_domain_lock (domain);
5029 if (!domain_jit_info (domain)->method_code_hash)
5030 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5031 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5033 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
5034 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5036 mono_domain_unlock (domain);
5038 if (cfg->compile_aot)
5039 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5041 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
5043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5046 if (cfg->compile_aot) {
5047 MonoDelegateClassMethodPair *del_tramp;
5049 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5050 del_tramp->klass = klass;
5051 del_tramp->method = context_used ? NULL : method;
5052 del_tramp->is_virtual = virtual;
5053 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5056 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5058 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5059 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5062 /* Set invoke_impl field */
5064 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5066 dreg = alloc_preg (cfg);
5067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5068 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5070 dreg = alloc_preg (cfg);
5071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5072 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5075 dreg = alloc_preg (cfg);
5076 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual ? 1 : 0);
5077 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5079 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5085 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5087 MonoJitICallInfo *info;
5089 /* Need to register the icall so it gets an icall wrapper */
5090 info = mono_get_array_new_va_icall (rank);
5092 cfg->flags |= MONO_CFG_HAS_VARARGS;
5094 /* mono_array_new_va () needs a vararg calling convention */
5095 cfg->disable_llvm = TRUE;
5097 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5098 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5102 * handle_constrained_gsharedvt_call:
5104 * Handle constrained calls where the receiver is a gsharedvt type.
5105 * Return the instruction representing the call. Set the cfg exception on failure.
5108 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5109 gboolean *ref_emit_widen)
5111 MonoInst *ins = NULL;
5112 gboolean emit_widen = *ref_emit_widen;
5115 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5116 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5117 * pack the arguments into an array, and do the rest of the work in in an icall.
5119 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5120 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5121 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5122 MonoInst *args [16];
5125 * This case handles calls to
5126 * - object:ToString()/Equals()/GetHashCode(),
5127 * - System.IComparable<T>:CompareTo()
5128 * - System.IEquatable<T>:Equals ()
5129 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5133 if (mono_method_check_context_used (cmethod))
5134 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5136 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5137 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5139 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5140 if (fsig->hasthis && fsig->param_count) {
5141 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5142 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5143 ins->dreg = alloc_preg (cfg);
5144 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5145 MONO_ADD_INS (cfg->cbb, ins);
5148 if (mini_is_gsharedvt_type (fsig->params [0])) {
5151 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5153 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5154 addr_reg = ins->dreg;
5155 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5157 EMIT_NEW_ICONST (cfg, args [3], 0);
5158 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5161 EMIT_NEW_ICONST (cfg, args [3], 0);
5162 EMIT_NEW_ICONST (cfg, args [4], 0);
5164 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5167 if (mini_is_gsharedvt_type (fsig->ret)) {
5168 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5169 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5173 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5174 MONO_ADD_INS (cfg->cbb, add);
5176 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5177 MONO_ADD_INS (cfg->cbb, ins);
5178 /* ins represents the call result */
5181 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5184 *ref_emit_widen = emit_widen;
5193 mono_emit_load_got_addr (MonoCompile *cfg)
5195 MonoInst *getaddr, *dummy_use;
5197 if (!cfg->got_var || cfg->got_var_allocated)
5200 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5201 getaddr->cil_code = cfg->header->code;
5202 getaddr->dreg = cfg->got_var->dreg;
5204 /* Add it to the start of the first bblock */
5205 if (cfg->bb_entry->code) {
5206 getaddr->next = cfg->bb_entry->code;
5207 cfg->bb_entry->code = getaddr;
5210 MONO_ADD_INS (cfg->bb_entry, getaddr);
5212 cfg->got_var_allocated = TRUE;
5215 * Add a dummy use to keep the got_var alive, since real uses might
5216 * only be generated by the back ends.
5217 * Add it to end_bblock, so the variable's lifetime covers the whole
5219 * It would be better to make the usage of the got var explicit in all
5220 * cases when the backend needs it (i.e. calls, throw etc.), so this
5221 * wouldn't be needed.
5223 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5224 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5227 static int inline_limit;
5228 static gboolean inline_limit_inited;
5231 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5233 MonoMethodHeaderSummary header;
5235 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5236 MonoMethodSignature *sig = mono_method_signature (method);
5240 if (cfg->disable_inline)
5245 if (cfg->inline_depth > 10)
5248 if (!mono_method_get_header_summary (method, &header))
5251 /*runtime, icall and pinvoke are checked by summary call*/
5252 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5253 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5254 (mono_class_is_marshalbyref (method->klass)) ||
5258 /* also consider num_locals? */
5259 /* Do the size check early to avoid creating vtables */
5260 if (!inline_limit_inited) {
5261 if (g_getenv ("MONO_INLINELIMIT"))
5262 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5264 inline_limit = INLINE_LENGTH_LIMIT;
5265 inline_limit_inited = TRUE;
5267 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5271 * if we can initialize the class of the method right away, we do,
5272 * otherwise we don't allow inlining if the class needs initialization,
5273 * since it would mean inserting a call to mono_runtime_class_init()
5274 * inside the inlined code
5276 if (!(cfg->opt & MONO_OPT_SHARED)) {
5277 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5278 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5279 vtable = mono_class_vtable (cfg->domain, method->klass);
5282 if (!cfg->compile_aot)
5283 mono_runtime_class_init (vtable);
5284 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5285 if (cfg->run_cctors && method->klass->has_cctor) {
5286 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5287 if (!method->klass->runtime_info)
5288 /* No vtable created yet */
5290 vtable = mono_class_vtable (cfg->domain, method->klass);
5293 /* This makes so that inline cannot trigger */
5294 /* .cctors: too many apps depend on them */
5295 /* running with a specific order... */
5296 if (! vtable->initialized)
5298 mono_runtime_class_init (vtable);
5300 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5301 if (!method->klass->runtime_info)
5302 /* No vtable created yet */
5304 vtable = mono_class_vtable (cfg->domain, method->klass);
5307 if (!vtable->initialized)
5312 * If we're compiling for shared code
5313 * the cctor will need to be run at aot method load time, for example,
5314 * or at the end of the compilation of the inlining method.
5316 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5320 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5321 if (mono_arch_is_soft_float ()) {
5323 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5325 for (i = 0; i < sig->param_count; ++i)
5326 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5331 if (g_list_find (cfg->dont_inline, method))
5338 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5340 if (!cfg->compile_aot) {
5342 if (vtable->initialized)
5346 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5347 if (cfg->method == method)
5351 if (!mono_class_needs_cctor_run (klass, method))
5354 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5355 /* The initialization is already done before the method is called */
5362 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5366 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5369 if (mini_is_gsharedvt_variable_klass (klass)) {
5372 mono_class_init (klass);
5373 size = mono_class_array_element_size (klass);
5376 mult_reg = alloc_preg (cfg);
5377 array_reg = arr->dreg;
5378 index_reg = index->dreg;
5380 #if SIZEOF_REGISTER == 8
5381 /* The array reg is 64 bits but the index reg is only 32 */
5382 if (COMPILE_LLVM (cfg)) {
5384 index2_reg = index_reg;
5386 index2_reg = alloc_preg (cfg);
5387 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5390 if (index->type == STACK_I8) {
5391 index2_reg = alloc_preg (cfg);
5392 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5394 index2_reg = index_reg;
5399 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5401 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5402 if (size == 1 || size == 2 || size == 4 || size == 8) {
5403 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5405 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5406 ins->klass = mono_class_get_element_class (klass);
5407 ins->type = STACK_MP;
5413 add_reg = alloc_ireg_mp (cfg);
5416 MonoInst *rgctx_ins;
5419 g_assert (cfg->gshared);
5420 context_used = mini_class_check_context_used (cfg, klass);
5421 g_assert (context_used);
5422 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5423 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5427 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5428 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5429 ins->klass = mono_class_get_element_class (klass);
5430 ins->type = STACK_MP;
5431 MONO_ADD_INS (cfg->cbb, ins);
5437 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5439 int bounds_reg = alloc_preg (cfg);
5440 int add_reg = alloc_ireg_mp (cfg);
5441 int mult_reg = alloc_preg (cfg);
5442 int mult2_reg = alloc_preg (cfg);
5443 int low1_reg = alloc_preg (cfg);
5444 int low2_reg = alloc_preg (cfg);
5445 int high1_reg = alloc_preg (cfg);
5446 int high2_reg = alloc_preg (cfg);
5447 int realidx1_reg = alloc_preg (cfg);
5448 int realidx2_reg = alloc_preg (cfg);
5449 int sum_reg = alloc_preg (cfg);
5450 int index1, index2, tmpreg;
5454 mono_class_init (klass);
5455 size = mono_class_array_element_size (klass);
5457 index1 = index_ins1->dreg;
5458 index2 = index_ins2->dreg;
5460 #if SIZEOF_REGISTER == 8
5461 /* The array reg is 64 bits but the index reg is only 32 */
5462 if (COMPILE_LLVM (cfg)) {
5465 tmpreg = alloc_preg (cfg);
5466 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5468 tmpreg = alloc_preg (cfg);
5469 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5473 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5477 /* range checking */
5478 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5479 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5482 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5483 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5485 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5486 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5487 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5489 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5490 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5491 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5493 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5494 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5495 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5497 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5498 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5501 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5503 ins->type = STACK_MP;
5505 MONO_ADD_INS (cfg->cbb, ins);
5511 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5515 MonoMethod *addr_method;
5517 MonoClass *eclass = cmethod->klass->element_class;
5519 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5522 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5524 /* emit_ldelema_2 depends on OP_LMUL */
5525 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5526 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5529 if (mini_is_gsharedvt_variable_klass (eclass))
5532 element_size = mono_class_array_element_size (eclass);
5533 addr_method = mono_marshal_get_array_address (rank, element_size);
5534 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5539 static MonoBreakPolicy
5540 always_insert_breakpoint (MonoMethod *method)
5542 return MONO_BREAK_POLICY_ALWAYS;
5545 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5548 * mono_set_break_policy:
5549 * policy_callback: the new callback function
5551 * Allow embedders to decide wherther to actually obey breakpoint instructions
5552 * (both break IL instructions and Debugger.Break () method calls), for example
5553 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5554 * untrusted or semi-trusted code.
5556 * @policy_callback will be called every time a break point instruction needs to
5557 * be inserted with the method argument being the method that calls Debugger.Break()
5558 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5559 * if it wants the breakpoint to not be effective in the given method.
5560 * #MONO_BREAK_POLICY_ALWAYS is the default.
5563 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5565 if (policy_callback)
5566 break_policy_func = policy_callback;
5568 break_policy_func = always_insert_breakpoint;
5572 should_insert_brekpoint (MonoMethod *method) {
5573 switch (break_policy_func (method)) {
5574 case MONO_BREAK_POLICY_ALWAYS:
5576 case MONO_BREAK_POLICY_NEVER:
5578 case MONO_BREAK_POLICY_ON_DBG:
5579 g_warning ("mdb no longer supported");
5582 g_warning ("Incorrect value returned from break policy callback");
5587 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5589 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5591 MonoInst *addr, *store, *load;
5592 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5594 /* the bounds check is already done by the callers */
5595 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5597 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5598 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5599 if (mini_type_is_reference (fsig->params [2]))
5600 emit_write_barrier (cfg, addr, load);
5602 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5603 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5610 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5612 return mini_type_is_reference (&klass->byval_arg);
5616 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5618 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5619 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5620 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5621 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5622 MonoInst *iargs [3];
5625 mono_class_setup_vtable (obj_array);
5626 g_assert (helper->slot);
5628 if (sp [0]->type != STACK_OBJ)
5630 if (sp [2]->type != STACK_OBJ)
5637 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5641 if (mini_is_gsharedvt_variable_klass (klass)) {
5644 // FIXME-VT: OP_ICONST optimization
5645 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5646 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5647 ins->opcode = OP_STOREV_MEMBASE;
5648 } else if (sp [1]->opcode == OP_ICONST) {
5649 int array_reg = sp [0]->dreg;
5650 int index_reg = sp [1]->dreg;
5651 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5654 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5655 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5657 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5658 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5659 if (generic_class_is_reference_type (cfg, klass))
5660 emit_write_barrier (cfg, addr, sp [2]);
5667 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5672 eklass = mono_class_from_mono_type (fsig->params [2]);
5674 eklass = mono_class_from_mono_type (fsig->ret);
5677 return emit_array_store (cfg, eklass, args, FALSE);
5679 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5686 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5690 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5692 //Only allow for valuetypes
5693 if (!param_klass->valuetype || !return_klass->valuetype)
5697 if (param_klass->has_references || return_klass->has_references)
5700 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5701 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5702 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5705 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5706 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5709 //And have the same size
5710 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5716 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5718 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5719 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5721 //Valuetypes that are semantically equivalent
5722 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5725 //Arrays of valuetypes that are semantically equivalent
5726 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5733 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5735 #ifdef MONO_ARCH_SIMD_INTRINSICS
5736 MonoInst *ins = NULL;
5738 if (cfg->opt & MONO_OPT_SIMD) {
5739 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5745 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5749 emit_memory_barrier (MonoCompile *cfg, int kind)
5751 MonoInst *ins = NULL;
5752 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5753 MONO_ADD_INS (cfg->cbb, ins);
5754 ins->backend.memory_barrier_kind = kind;
5760 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5762 MonoInst *ins = NULL;
5765 /* The LLVM backend supports these intrinsics */
5766 if (cmethod->klass == mono_defaults.math_class) {
5767 if (strcmp (cmethod->name, "Sin") == 0) {
5769 } else if (strcmp (cmethod->name, "Cos") == 0) {
5771 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5773 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5777 if (opcode && fsig->param_count == 1) {
5778 MONO_INST_NEW (cfg, ins, opcode);
5779 ins->type = STACK_R8;
5780 ins->dreg = mono_alloc_freg (cfg);
5781 ins->sreg1 = args [0]->dreg;
5782 MONO_ADD_INS (cfg->cbb, ins);
5786 if (cfg->opt & MONO_OPT_CMOV) {
5787 if (strcmp (cmethod->name, "Min") == 0) {
5788 if (fsig->params [0]->type == MONO_TYPE_I4)
5790 if (fsig->params [0]->type == MONO_TYPE_U4)
5791 opcode = OP_IMIN_UN;
5792 else if (fsig->params [0]->type == MONO_TYPE_I8)
5794 else if (fsig->params [0]->type == MONO_TYPE_U8)
5795 opcode = OP_LMIN_UN;
5796 } else if (strcmp (cmethod->name, "Max") == 0) {
5797 if (fsig->params [0]->type == MONO_TYPE_I4)
5799 if (fsig->params [0]->type == MONO_TYPE_U4)
5800 opcode = OP_IMAX_UN;
5801 else if (fsig->params [0]->type == MONO_TYPE_I8)
5803 else if (fsig->params [0]->type == MONO_TYPE_U8)
5804 opcode = OP_LMAX_UN;
5808 if (opcode && fsig->param_count == 2) {
5809 MONO_INST_NEW (cfg, ins, opcode);
5810 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5811 ins->dreg = mono_alloc_ireg (cfg);
5812 ins->sreg1 = args [0]->dreg;
5813 ins->sreg2 = args [1]->dreg;
5814 MONO_ADD_INS (cfg->cbb, ins);
5822 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5824 if (cmethod->klass == mono_defaults.array_class) {
5825 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5826 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5827 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5828 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5829 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5830 return emit_array_unsafe_mov (cfg, fsig, args);
5837 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5839 MonoInst *ins = NULL;
5841 static MonoClass *runtime_helpers_class = NULL;
5842 if (! runtime_helpers_class)
5843 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5844 "System.Runtime.CompilerServices", "RuntimeHelpers");
5846 if (cmethod->klass == mono_defaults.string_class) {
5847 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5848 int dreg = alloc_ireg (cfg);
5849 int index_reg = alloc_preg (cfg);
5850 int add_reg = alloc_preg (cfg);
5852 #if SIZEOF_REGISTER == 8
5853 /* The array reg is 64 bits but the index reg is only 32 */
5854 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5856 index_reg = args [1]->dreg;
5858 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5860 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5861 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5862 add_reg = ins->dreg;
5863 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5866 int mult_reg = alloc_preg (cfg);
5867 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5868 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5869 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5870 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5872 type_from_op (cfg, ins, NULL, NULL);
5874 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5875 int dreg = alloc_ireg (cfg);
5876 /* Decompose later to allow more optimizations */
5877 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5878 ins->type = STACK_I4;
5879 ins->flags |= MONO_INST_FAULT;
5880 cfg->cbb->has_array_access = TRUE;
5881 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5886 } else if (cmethod->klass == mono_defaults.object_class) {
5888 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5889 int dreg = alloc_ireg_ref (cfg);
5890 int vt_reg = alloc_preg (cfg);
5891 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5892 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5893 type_from_op (cfg, ins, NULL, NULL);
5896 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5897 int dreg = alloc_ireg (cfg);
5898 int t1 = alloc_ireg (cfg);
5900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5901 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5902 ins->type = STACK_I4;
5905 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5906 MONO_INST_NEW (cfg, ins, OP_NOP);
5907 MONO_ADD_INS (cfg->cbb, ins);
5911 } else if (cmethod->klass == mono_defaults.array_class) {
5912 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5913 return emit_array_generic_access (cfg, fsig, args, FALSE);
5914 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5915 return emit_array_generic_access (cfg, fsig, args, TRUE);
5917 #ifndef MONO_BIG_ARRAYS
5919 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5922 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5923 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5924 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5925 int dreg = alloc_ireg (cfg);
5926 int bounds_reg = alloc_ireg_mp (cfg);
5927 MonoBasicBlock *end_bb, *szarray_bb;
5928 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5930 NEW_BBLOCK (cfg, end_bb);
5931 NEW_BBLOCK (cfg, szarray_bb);
5933 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5934 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5935 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5936 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5937 /* Non-szarray case */
5939 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5940 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5942 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5943 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5945 MONO_START_BB (cfg, szarray_bb);
5948 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5949 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5951 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5952 MONO_START_BB (cfg, end_bb);
5954 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5955 ins->type = STACK_I4;
5961 if (cmethod->name [0] != 'g')
5964 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5965 int dreg = alloc_ireg (cfg);
5966 int vtable_reg = alloc_preg (cfg);
5967 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5968 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5969 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5970 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5971 type_from_op (cfg, ins, NULL, NULL);
5974 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5975 int dreg = alloc_ireg (cfg);
5977 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5978 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5979 type_from_op (cfg, ins, NULL, NULL);
5984 } else if (cmethod->klass == runtime_helpers_class) {
5986 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5987 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5991 } else if (cmethod->klass == mono_defaults.thread_class) {
5992 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5993 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5994 MONO_ADD_INS (cfg->cbb, ins);
5996 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5997 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5998 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6000 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6002 if (fsig->params [0]->type == MONO_TYPE_I1)
6003 opcode = OP_LOADI1_MEMBASE;
6004 else if (fsig->params [0]->type == MONO_TYPE_U1)
6005 opcode = OP_LOADU1_MEMBASE;
6006 else if (fsig->params [0]->type == MONO_TYPE_I2)
6007 opcode = OP_LOADI2_MEMBASE;
6008 else if (fsig->params [0]->type == MONO_TYPE_U2)
6009 opcode = OP_LOADU2_MEMBASE;
6010 else if (fsig->params [0]->type == MONO_TYPE_I4)
6011 opcode = OP_LOADI4_MEMBASE;
6012 else if (fsig->params [0]->type == MONO_TYPE_U4)
6013 opcode = OP_LOADU4_MEMBASE;
6014 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6015 opcode = OP_LOADI8_MEMBASE;
6016 else if (fsig->params [0]->type == MONO_TYPE_R4)
6017 opcode = OP_LOADR4_MEMBASE;
6018 else if (fsig->params [0]->type == MONO_TYPE_R8)
6019 opcode = OP_LOADR8_MEMBASE;
6020 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6021 opcode = OP_LOAD_MEMBASE;
6024 MONO_INST_NEW (cfg, ins, opcode);
6025 ins->inst_basereg = args [0]->dreg;
6026 ins->inst_offset = 0;
6027 MONO_ADD_INS (cfg->cbb, ins);
6029 switch (fsig->params [0]->type) {
6036 ins->dreg = mono_alloc_ireg (cfg);
6037 ins->type = STACK_I4;
6041 ins->dreg = mono_alloc_lreg (cfg);
6042 ins->type = STACK_I8;
6046 ins->dreg = mono_alloc_ireg (cfg);
6047 #if SIZEOF_REGISTER == 8
6048 ins->type = STACK_I8;
6050 ins->type = STACK_I4;
6055 ins->dreg = mono_alloc_freg (cfg);
6056 ins->type = STACK_R8;
6059 g_assert (mini_type_is_reference (fsig->params [0]));
6060 ins->dreg = mono_alloc_ireg_ref (cfg);
6061 ins->type = STACK_OBJ;
6065 if (opcode == OP_LOADI8_MEMBASE)
6066 ins = mono_decompose_opcode (cfg, ins);
6068 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
6072 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6074 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6076 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6077 opcode = OP_STOREI1_MEMBASE_REG;
6078 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6079 opcode = OP_STOREI2_MEMBASE_REG;
6080 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6081 opcode = OP_STOREI4_MEMBASE_REG;
6082 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6083 opcode = OP_STOREI8_MEMBASE_REG;
6084 else if (fsig->params [0]->type == MONO_TYPE_R4)
6085 opcode = OP_STORER4_MEMBASE_REG;
6086 else if (fsig->params [0]->type == MONO_TYPE_R8)
6087 opcode = OP_STORER8_MEMBASE_REG;
6088 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6089 opcode = OP_STORE_MEMBASE_REG;
6092 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6094 MONO_INST_NEW (cfg, ins, opcode);
6095 ins->sreg1 = args [1]->dreg;
6096 ins->inst_destbasereg = args [0]->dreg;
6097 ins->inst_offset = 0;
6098 MONO_ADD_INS (cfg->cbb, ins);
6100 if (opcode == OP_STOREI8_MEMBASE_REG)
6101 ins = mono_decompose_opcode (cfg, ins);
6106 } else if (cmethod->klass->image == mono_defaults.corlib &&
6107 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6108 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6111 #if SIZEOF_REGISTER == 8
6112 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6113 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6114 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6115 ins->dreg = mono_alloc_preg (cfg);
6116 ins->sreg1 = args [0]->dreg;
6117 ins->type = STACK_I8;
6118 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6119 MONO_ADD_INS (cfg->cbb, ins);
6123 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6125 /* 64 bit reads are already atomic */
6126 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6127 load_ins->dreg = mono_alloc_preg (cfg);
6128 load_ins->inst_basereg = args [0]->dreg;
6129 load_ins->inst_offset = 0;
6130 load_ins->type = STACK_I8;
6131 MONO_ADD_INS (cfg->cbb, load_ins);
6133 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6140 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6141 MonoInst *ins_iconst;
6144 if (fsig->params [0]->type == MONO_TYPE_I4) {
6145 opcode = OP_ATOMIC_ADD_I4;
6146 cfg->has_atomic_add_i4 = TRUE;
6148 #if SIZEOF_REGISTER == 8
6149 else if (fsig->params [0]->type == MONO_TYPE_I8)
6150 opcode = OP_ATOMIC_ADD_I8;
6153 if (!mono_arch_opcode_supported (opcode))
6155 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6156 ins_iconst->inst_c0 = 1;
6157 ins_iconst->dreg = mono_alloc_ireg (cfg);
6158 MONO_ADD_INS (cfg->cbb, ins_iconst);
6160 MONO_INST_NEW (cfg, ins, opcode);
6161 ins->dreg = mono_alloc_ireg (cfg);
6162 ins->inst_basereg = args [0]->dreg;
6163 ins->inst_offset = 0;
6164 ins->sreg2 = ins_iconst->dreg;
6165 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6166 MONO_ADD_INS (cfg->cbb, ins);
6168 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6169 MonoInst *ins_iconst;
6172 if (fsig->params [0]->type == MONO_TYPE_I4) {
6173 opcode = OP_ATOMIC_ADD_I4;
6174 cfg->has_atomic_add_i4 = TRUE;
6176 #if SIZEOF_REGISTER == 8
6177 else if (fsig->params [0]->type == MONO_TYPE_I8)
6178 opcode = OP_ATOMIC_ADD_I8;
6181 if (!mono_arch_opcode_supported (opcode))
6183 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6184 ins_iconst->inst_c0 = -1;
6185 ins_iconst->dreg = mono_alloc_ireg (cfg);
6186 MONO_ADD_INS (cfg->cbb, ins_iconst);
6188 MONO_INST_NEW (cfg, ins, opcode);
6189 ins->dreg = mono_alloc_ireg (cfg);
6190 ins->inst_basereg = args [0]->dreg;
6191 ins->inst_offset = 0;
6192 ins->sreg2 = ins_iconst->dreg;
6193 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6194 MONO_ADD_INS (cfg->cbb, ins);
6196 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6199 if (fsig->params [0]->type == MONO_TYPE_I4) {
6200 opcode = OP_ATOMIC_ADD_I4;
6201 cfg->has_atomic_add_i4 = TRUE;
6203 #if SIZEOF_REGISTER == 8
6204 else if (fsig->params [0]->type == MONO_TYPE_I8)
6205 opcode = OP_ATOMIC_ADD_I8;
6208 if (!mono_arch_opcode_supported (opcode))
6210 MONO_INST_NEW (cfg, ins, opcode);
6211 ins->dreg = mono_alloc_ireg (cfg);
6212 ins->inst_basereg = args [0]->dreg;
6213 ins->inst_offset = 0;
6214 ins->sreg2 = args [1]->dreg;
6215 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6216 MONO_ADD_INS (cfg->cbb, ins);
6219 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6220 MonoInst *f2i = NULL, *i2f;
6221 guint32 opcode, f2i_opcode, i2f_opcode;
6222 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6223 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6225 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6226 fsig->params [0]->type == MONO_TYPE_R4) {
6227 opcode = OP_ATOMIC_EXCHANGE_I4;
6228 f2i_opcode = OP_MOVE_F_TO_I4;
6229 i2f_opcode = OP_MOVE_I4_TO_F;
6230 cfg->has_atomic_exchange_i4 = TRUE;
6232 #if SIZEOF_REGISTER == 8
6234 fsig->params [0]->type == MONO_TYPE_I8 ||
6235 fsig->params [0]->type == MONO_TYPE_R8 ||
6236 fsig->params [0]->type == MONO_TYPE_I) {
6237 opcode = OP_ATOMIC_EXCHANGE_I8;
6238 f2i_opcode = OP_MOVE_F_TO_I8;
6239 i2f_opcode = OP_MOVE_I8_TO_F;
6242 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6243 opcode = OP_ATOMIC_EXCHANGE_I4;
6244 cfg->has_atomic_exchange_i4 = TRUE;
6250 if (!mono_arch_opcode_supported (opcode))
6254 /* TODO: Decompose these opcodes instead of bailing here. */
6255 if (COMPILE_SOFT_FLOAT (cfg))
6258 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6259 f2i->dreg = mono_alloc_ireg (cfg);
6260 f2i->sreg1 = args [1]->dreg;
6261 if (f2i_opcode == OP_MOVE_F_TO_I4)
6262 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6263 MONO_ADD_INS (cfg->cbb, f2i);
6266 MONO_INST_NEW (cfg, ins, opcode);
6267 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6268 ins->inst_basereg = args [0]->dreg;
6269 ins->inst_offset = 0;
6270 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6271 MONO_ADD_INS (cfg->cbb, ins);
6273 switch (fsig->params [0]->type) {
6275 ins->type = STACK_I4;
6278 ins->type = STACK_I8;
6281 #if SIZEOF_REGISTER == 8
6282 ins->type = STACK_I8;
6284 ins->type = STACK_I4;
6289 ins->type = STACK_R8;
6292 g_assert (mini_type_is_reference (fsig->params [0]));
6293 ins->type = STACK_OBJ;
6298 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6299 i2f->dreg = mono_alloc_freg (cfg);
6300 i2f->sreg1 = ins->dreg;
6301 i2f->type = STACK_R8;
6302 if (i2f_opcode == OP_MOVE_I4_TO_F)
6303 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6304 MONO_ADD_INS (cfg->cbb, i2f);
6309 if (cfg->gen_write_barriers && is_ref)
6310 emit_write_barrier (cfg, args [0], args [1]);
6312 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6313 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6314 guint32 opcode, f2i_opcode, i2f_opcode;
6315 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6316 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6318 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6319 fsig->params [1]->type == MONO_TYPE_R4) {
6320 opcode = OP_ATOMIC_CAS_I4;
6321 f2i_opcode = OP_MOVE_F_TO_I4;
6322 i2f_opcode = OP_MOVE_I4_TO_F;
6323 cfg->has_atomic_cas_i4 = TRUE;
6325 #if SIZEOF_REGISTER == 8
6327 fsig->params [1]->type == MONO_TYPE_I8 ||
6328 fsig->params [1]->type == MONO_TYPE_R8 ||
6329 fsig->params [1]->type == MONO_TYPE_I) {
6330 opcode = OP_ATOMIC_CAS_I8;
6331 f2i_opcode = OP_MOVE_F_TO_I8;
6332 i2f_opcode = OP_MOVE_I8_TO_F;
6335 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6336 opcode = OP_ATOMIC_CAS_I4;
6337 cfg->has_atomic_cas_i4 = TRUE;
6343 if (!mono_arch_opcode_supported (opcode))
6347 /* TODO: Decompose these opcodes instead of bailing here. */
6348 if (COMPILE_SOFT_FLOAT (cfg))
6351 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6352 f2i_new->dreg = mono_alloc_ireg (cfg);
6353 f2i_new->sreg1 = args [1]->dreg;
6354 if (f2i_opcode == OP_MOVE_F_TO_I4)
6355 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6356 MONO_ADD_INS (cfg->cbb, f2i_new);
6358 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6359 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6360 f2i_cmp->sreg1 = args [2]->dreg;
6361 if (f2i_opcode == OP_MOVE_F_TO_I4)
6362 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6363 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6366 MONO_INST_NEW (cfg, ins, opcode);
6367 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6368 ins->sreg1 = args [0]->dreg;
6369 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6370 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6371 MONO_ADD_INS (cfg->cbb, ins);
6373 switch (fsig->params [1]->type) {
6375 ins->type = STACK_I4;
6378 ins->type = STACK_I8;
6381 #if SIZEOF_REGISTER == 8
6382 ins->type = STACK_I8;
6384 ins->type = STACK_I4;
6388 ins->type = cfg->r4_stack_type;
6391 ins->type = STACK_R8;
6394 g_assert (mini_type_is_reference (fsig->params [1]));
6395 ins->type = STACK_OBJ;
6400 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6401 i2f->dreg = mono_alloc_freg (cfg);
6402 i2f->sreg1 = ins->dreg;
6403 i2f->type = STACK_R8;
6404 if (i2f_opcode == OP_MOVE_I4_TO_F)
6405 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6406 MONO_ADD_INS (cfg->cbb, i2f);
6411 if (cfg->gen_write_barriers && is_ref)
6412 emit_write_barrier (cfg, args [0], args [1]);
6414 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6415 fsig->params [1]->type == MONO_TYPE_I4) {
6416 MonoInst *cmp, *ceq;
6418 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6421 /* int32 r = CAS (location, value, comparand); */
6422 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6423 ins->dreg = alloc_ireg (cfg);
6424 ins->sreg1 = args [0]->dreg;
6425 ins->sreg2 = args [1]->dreg;
6426 ins->sreg3 = args [2]->dreg;
6427 ins->type = STACK_I4;
6428 MONO_ADD_INS (cfg->cbb, ins);
6430 /* bool result = r == comparand; */
6431 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6432 cmp->sreg1 = ins->dreg;
6433 cmp->sreg2 = args [2]->dreg;
6434 cmp->type = STACK_I4;
6435 MONO_ADD_INS (cfg->cbb, cmp);
6437 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6438 ceq->dreg = alloc_ireg (cfg);
6439 ceq->type = STACK_I4;
6440 MONO_ADD_INS (cfg->cbb, ceq);
6442 /* *success = result; */
6443 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6445 cfg->has_atomic_cas_i4 = TRUE;
6447 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6448 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6452 } else if (cmethod->klass->image == mono_defaults.corlib &&
6453 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6454 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6457 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6459 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6460 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6462 if (fsig->params [0]->type == MONO_TYPE_I1)
6463 opcode = OP_ATOMIC_LOAD_I1;
6464 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6465 opcode = OP_ATOMIC_LOAD_U1;
6466 else if (fsig->params [0]->type == MONO_TYPE_I2)
6467 opcode = OP_ATOMIC_LOAD_I2;
6468 else if (fsig->params [0]->type == MONO_TYPE_U2)
6469 opcode = OP_ATOMIC_LOAD_U2;
6470 else if (fsig->params [0]->type == MONO_TYPE_I4)
6471 opcode = OP_ATOMIC_LOAD_I4;
6472 else if (fsig->params [0]->type == MONO_TYPE_U4)
6473 opcode = OP_ATOMIC_LOAD_U4;
6474 else if (fsig->params [0]->type == MONO_TYPE_R4)
6475 opcode = OP_ATOMIC_LOAD_R4;
6476 else if (fsig->params [0]->type == MONO_TYPE_R8)
6477 opcode = OP_ATOMIC_LOAD_R8;
6478 #if SIZEOF_REGISTER == 8
6479 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6480 opcode = OP_ATOMIC_LOAD_I8;
6481 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6482 opcode = OP_ATOMIC_LOAD_U8;
6484 else if (fsig->params [0]->type == MONO_TYPE_I)
6485 opcode = OP_ATOMIC_LOAD_I4;
6486 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6487 opcode = OP_ATOMIC_LOAD_U4;
6491 if (!mono_arch_opcode_supported (opcode))
6494 MONO_INST_NEW (cfg, ins, opcode);
6495 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6496 ins->sreg1 = args [0]->dreg;
6497 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6498 MONO_ADD_INS (cfg->cbb, ins);
6500 switch (fsig->params [0]->type) {
6501 case MONO_TYPE_BOOLEAN:
6508 ins->type = STACK_I4;
6512 ins->type = STACK_I8;
6516 #if SIZEOF_REGISTER == 8
6517 ins->type = STACK_I8;
6519 ins->type = STACK_I4;
6523 ins->type = cfg->r4_stack_type;
6526 ins->type = STACK_R8;
6529 g_assert (mini_type_is_reference (fsig->params [0]));
6530 ins->type = STACK_OBJ;
6536 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6538 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6540 if (fsig->params [0]->type == MONO_TYPE_I1)
6541 opcode = OP_ATOMIC_STORE_I1;
6542 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6543 opcode = OP_ATOMIC_STORE_U1;
6544 else if (fsig->params [0]->type == MONO_TYPE_I2)
6545 opcode = OP_ATOMIC_STORE_I2;
6546 else if (fsig->params [0]->type == MONO_TYPE_U2)
6547 opcode = OP_ATOMIC_STORE_U2;
6548 else if (fsig->params [0]->type == MONO_TYPE_I4)
6549 opcode = OP_ATOMIC_STORE_I4;
6550 else if (fsig->params [0]->type == MONO_TYPE_U4)
6551 opcode = OP_ATOMIC_STORE_U4;
6552 else if (fsig->params [0]->type == MONO_TYPE_R4)
6553 opcode = OP_ATOMIC_STORE_R4;
6554 else if (fsig->params [0]->type == MONO_TYPE_R8)
6555 opcode = OP_ATOMIC_STORE_R8;
6556 #if SIZEOF_REGISTER == 8
6557 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6558 opcode = OP_ATOMIC_STORE_I8;
6559 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6560 opcode = OP_ATOMIC_STORE_U8;
6562 else if (fsig->params [0]->type == MONO_TYPE_I)
6563 opcode = OP_ATOMIC_STORE_I4;
6564 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6565 opcode = OP_ATOMIC_STORE_U4;
6569 if (!mono_arch_opcode_supported (opcode))
6572 MONO_INST_NEW (cfg, ins, opcode);
6573 ins->dreg = args [0]->dreg;
6574 ins->sreg1 = args [1]->dreg;
6575 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6576 MONO_ADD_INS (cfg->cbb, ins);
6578 if (cfg->gen_write_barriers && is_ref)
6579 emit_write_barrier (cfg, args [0], args [1]);
6585 } else if (cmethod->klass->image == mono_defaults.corlib &&
6586 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6587 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6588 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6589 if (should_insert_brekpoint (cfg->method)) {
6590 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6592 MONO_INST_NEW (cfg, ins, OP_NOP);
6593 MONO_ADD_INS (cfg->cbb, ins);
6597 } else if (cmethod->klass->image == mono_defaults.corlib &&
6598 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6599 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6600 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6602 EMIT_NEW_ICONST (cfg, ins, 1);
6604 EMIT_NEW_ICONST (cfg, ins, 0);
6607 } else if (cmethod->klass->image == mono_defaults.corlib &&
6608 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6609 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6610 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6611 /* No stack walks are current available, so implement this as an intrinsic */
6612 MonoInst *assembly_ins;
6614 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6615 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6618 } else if (cmethod->klass == mono_defaults.math_class) {
6620 * There is general branchless code for Min/Max, but it does not work for
6622 * http://everything2.com/?node_id=1051618
6624 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6625 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6626 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6627 !strcmp (cmethod->klass->name, "Selector")) ||
6628 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6629 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6630 !strcmp (cmethod->klass->name, "Selector"))
6632 if (cfg->backend->have_objc_get_selector &&
6633 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6634 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6637 MonoJumpInfoToken *ji;
6640 cfg->disable_llvm = TRUE;
6642 if (args [0]->opcode == OP_GOT_ENTRY) {
6643 pi = args [0]->inst_p1;
6644 g_assert (pi->opcode == OP_PATCH_INFO);
6645 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6648 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6649 ji = args [0]->inst_p0;
6652 NULLIFY_INS (args [0]);
6655 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6656 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6657 ins->dreg = mono_alloc_ireg (cfg);
6659 ins->inst_p0 = mono_string_to_utf8 (s);
6660 MONO_ADD_INS (cfg->cbb, ins);
6665 #ifdef MONO_ARCH_SIMD_INTRINSICS
6666 if (cfg->opt & MONO_OPT_SIMD) {
6667 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6673 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6677 if (COMPILE_LLVM (cfg)) {
6678 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6683 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6687 * This entry point could be used later for arbitrary method
6690 inline static MonoInst*
6691 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6692 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6694 if (method->klass == mono_defaults.string_class) {
6695 /* managed string allocation support */
6696 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6697 MonoInst *iargs [2];
6698 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6699 MonoMethod *managed_alloc = NULL;
6701 g_assert (vtable); /*Should not fail since it System.String*/
6702 #ifndef MONO_CROSS_COMPILE
6703 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6707 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6708 iargs [1] = args [0];
6709 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6716 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6718 MonoInst *store, *temp;
6721 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6722 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6725 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6726 * would be different than the MonoInst's used to represent arguments, and
6727 * the ldelema implementation can't deal with that.
6728 * Solution: When ldelema is used on an inline argument, create a var for
6729 * it, emit ldelema on that var, and emit the saving code below in
6730 * inline_method () if needed.
6732 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6733 cfg->args [i] = temp;
6734 /* This uses cfg->args [i] which is set by the preceeding line */
6735 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6736 store->cil_code = sp [0]->cil_code;
6741 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6742 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6744 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6746 check_inline_called_method_name_limit (MonoMethod *called_method)
6749 static const char *limit = NULL;
6751 if (limit == NULL) {
6752 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6754 if (limit_string != NULL)
6755 limit = limit_string;
6760 if (limit [0] != '\0') {
6761 char *called_method_name = mono_method_full_name (called_method, TRUE);
6763 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6764 g_free (called_method_name);
6766 //return (strncmp_result <= 0);
6767 return (strncmp_result == 0);
6774 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6776 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6779 static const char *limit = NULL;
6781 if (limit == NULL) {
6782 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6783 if (limit_string != NULL) {
6784 limit = limit_string;
6790 if (limit [0] != '\0') {
6791 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6793 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6794 g_free (caller_method_name);
6796 //return (strncmp_result <= 0);
6797 return (strncmp_result == 0);
6805 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6807 static double r8_0 = 0.0;
6808 static float r4_0 = 0.0;
6812 rtype = mini_get_underlying_type (rtype);
6816 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6817 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6818 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6819 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6820 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6821 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6822 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6823 ins->type = STACK_R4;
6824 ins->inst_p0 = (void*)&r4_0;
6826 MONO_ADD_INS (cfg->cbb, ins);
6827 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6828 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6829 ins->type = STACK_R8;
6830 ins->inst_p0 = (void*)&r8_0;
6832 MONO_ADD_INS (cfg->cbb, ins);
6833 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6834 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6835 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6836 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6837 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6839 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6844 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6848 rtype = mini_get_underlying_type (rtype);
6852 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6853 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6854 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6855 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6856 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6857 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6858 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6859 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6860 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6861 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6862 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6863 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6864 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6865 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6867 emit_init_rvar (cfg, dreg, rtype);
6871 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6873 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6875 MonoInst *var = cfg->locals [local];
6876 if (COMPILE_SOFT_FLOAT (cfg)) {
6878 int reg = alloc_dreg (cfg, var->type);
6879 emit_init_rvar (cfg, reg, type);
6880 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6883 emit_init_rvar (cfg, var->dreg, type);
6885 emit_dummy_init_rvar (cfg, var->dreg, type);
6892 * Return the cost of inlining CMETHOD.
6895 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6896 guchar *ip, guint real_offset, gboolean inline_always)
6898 MonoInst *ins, *rvar = NULL;
6899 MonoMethodHeader *cheader;
6900 MonoBasicBlock *ebblock, *sbblock;
6902 MonoMethod *prev_inlined_method;
6903 MonoInst **prev_locals, **prev_args;
6904 MonoType **prev_arg_types;
6905 guint prev_real_offset;
6906 GHashTable *prev_cbb_hash;
6907 MonoBasicBlock **prev_cil_offset_to_bb;
6908 MonoBasicBlock *prev_cbb;
6909 unsigned char* prev_cil_start;
6910 guint32 prev_cil_offset_to_bb_len;
6911 MonoMethod *prev_current_method;
6912 MonoGenericContext *prev_generic_context;
6913 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6915 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6917 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6918 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6921 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6922 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6927 fsig = mono_method_signature (cmethod);
6929 if (cfg->verbose_level > 2)
6930 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6932 if (!cmethod->inline_info) {
6933 cfg->stat_inlineable_methods++;
6934 cmethod->inline_info = 1;
6937 /* allocate local variables */
6938 cheader = mono_method_get_header (cmethod);
6940 if (cheader == NULL || mono_loader_get_last_error ()) {
6941 MonoLoaderError *error = mono_loader_get_last_error ();
6944 mono_metadata_free_mh (cheader);
6945 if (inline_always && error)
6946 mono_cfg_set_exception (cfg, error->exception_type);
6948 mono_loader_clear_error ();
6952 /*Must verify before creating locals as it can cause the JIT to assert.*/
6953 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6954 mono_metadata_free_mh (cheader);
6958 /* allocate space to store the return value */
6959 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6960 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6963 prev_locals = cfg->locals;
6964 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6965 for (i = 0; i < cheader->num_locals; ++i)
6966 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6968 /* allocate start and end blocks */
6969 /* This is needed so if the inline is aborted, we can clean up */
6970 NEW_BBLOCK (cfg, sbblock);
6971 sbblock->real_offset = real_offset;
6973 NEW_BBLOCK (cfg, ebblock);
6974 ebblock->block_num = cfg->num_bblocks++;
6975 ebblock->real_offset = real_offset;
6977 prev_args = cfg->args;
6978 prev_arg_types = cfg->arg_types;
6979 prev_inlined_method = cfg->inlined_method;
6980 cfg->inlined_method = cmethod;
6981 cfg->ret_var_set = FALSE;
6982 cfg->inline_depth ++;
6983 prev_real_offset = cfg->real_offset;
6984 prev_cbb_hash = cfg->cbb_hash;
6985 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6986 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6987 prev_cil_start = cfg->cil_start;
6988 prev_cbb = cfg->cbb;
6989 prev_current_method = cfg->current_method;
6990 prev_generic_context = cfg->generic_context;
6991 prev_ret_var_set = cfg->ret_var_set;
6992 prev_disable_inline = cfg->disable_inline;
6994 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6997 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6999 ret_var_set = cfg->ret_var_set;
7001 cfg->inlined_method = prev_inlined_method;
7002 cfg->real_offset = prev_real_offset;
7003 cfg->cbb_hash = prev_cbb_hash;
7004 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7005 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7006 cfg->cil_start = prev_cil_start;
7007 cfg->locals = prev_locals;
7008 cfg->args = prev_args;
7009 cfg->arg_types = prev_arg_types;
7010 cfg->current_method = prev_current_method;
7011 cfg->generic_context = prev_generic_context;
7012 cfg->ret_var_set = prev_ret_var_set;
7013 cfg->disable_inline = prev_disable_inline;
7014 cfg->inline_depth --;
7016 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7017 if (cfg->verbose_level > 2)
7018 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7020 cfg->stat_inlined_methods++;
7022 /* always add some code to avoid block split failures */
7023 MONO_INST_NEW (cfg, ins, OP_NOP);
7024 MONO_ADD_INS (prev_cbb, ins);
7026 prev_cbb->next_bb = sbblock;
7027 link_bblock (cfg, prev_cbb, sbblock);
7030 * Get rid of the begin and end bblocks if possible to aid local
7033 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7035 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7036 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7038 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7039 MonoBasicBlock *prev = ebblock->in_bb [0];
7040 mono_merge_basic_blocks (cfg, prev, ebblock);
7042 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7043 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7044 cfg->cbb = prev_cbb;
7048 * Its possible that the rvar is set in some prev bblock, but not in others.
7054 for (i = 0; i < ebblock->in_count; ++i) {
7055 bb = ebblock->in_bb [i];
7057 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7060 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7070 * If the inlined method contains only a throw, then the ret var is not
7071 * set, so set it to a dummy value.
7074 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7076 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7079 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7082 if (cfg->verbose_level > 2)
7083 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7084 cfg->exception_type = MONO_EXCEPTION_NONE;
7085 mono_loader_clear_error ();
7087 /* This gets rid of the newly added bblocks */
7088 cfg->cbb = prev_cbb;
7090 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7095 * Some of these comments may well be out-of-date.
7096 * Design decisions: we do a single pass over the IL code (and we do bblock
7097 * splitting/merging in the few cases when it's required: a back jump to an IL
7098 * address that was not already seen as bblock starting point).
7099 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7100 * Complex operations are decomposed in simpler ones right away. We need to let the
7101 * arch-specific code peek and poke inside this process somehow (except when the
7102 * optimizations can take advantage of the full semantic info of coarse opcodes).
7103 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7104 * MonoInst->opcode initially is the IL opcode or some simplification of that
7105 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7106 * opcode with value bigger than OP_LAST.
7107 * At this point the IR can be handed over to an interpreter, a dumb code generator
7108 * or to the optimizing code generator that will translate it to SSA form.
7110 * Profiling directed optimizations.
7111 * We may compile by default with few or no optimizations and instrument the code
7112 * or the user may indicate what methods to optimize the most either in a config file
7113 * or through repeated runs where the compiler applies offline the optimizations to
7114 * each method and then decides if it was worth it.
7117 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7118 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7119 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7120 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7121 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7122 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7123 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7124 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7126 /* offset from br.s -> br like opcodes */
7127 #define BIG_BRANCH_OFFSET 13
7130 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7132 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7134 return b == NULL || b == bb;
7138 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7140 unsigned char *ip = start;
7141 unsigned char *target;
7144 MonoBasicBlock *bblock;
7145 const MonoOpcode *opcode;
7148 cli_addr = ip - start;
7149 i = mono_opcode_value ((const guint8 **)&ip, end);
7152 opcode = &mono_opcodes [i];
7153 switch (opcode->argument) {
7154 case MonoInlineNone:
7157 case MonoInlineString:
7158 case MonoInlineType:
7159 case MonoInlineField:
7160 case MonoInlineMethod:
7163 case MonoShortInlineR:
7170 case MonoShortInlineVar:
7171 case MonoShortInlineI:
7174 case MonoShortInlineBrTarget:
7175 target = start + cli_addr + 2 + (signed char)ip [1];
7176 GET_BBLOCK (cfg, bblock, target);
7179 GET_BBLOCK (cfg, bblock, ip);
7181 case MonoInlineBrTarget:
7182 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7183 GET_BBLOCK (cfg, bblock, target);
7186 GET_BBLOCK (cfg, bblock, ip);
7188 case MonoInlineSwitch: {
7189 guint32 n = read32 (ip + 1);
7192 cli_addr += 5 + 4 * n;
7193 target = start + cli_addr;
7194 GET_BBLOCK (cfg, bblock, target);
7196 for (j = 0; j < n; ++j) {
7197 target = start + cli_addr + (gint32)read32 (ip);
7198 GET_BBLOCK (cfg, bblock, target);
7208 g_assert_not_reached ();
7211 if (i == CEE_THROW) {
7212 unsigned char *bb_start = ip - 1;
7214 /* Find the start of the bblock containing the throw */
7216 while ((bb_start >= start) && !bblock) {
7217 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7221 bblock->out_of_line = 1;
7231 static inline MonoMethod *
7232 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7236 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7237 method = mono_method_get_wrapper_data (m, token);
7240 method = mono_class_inflate_generic_method_checked (method, context, &error);
7241 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7244 method = mono_get_method_full (m->klass->image, token, klass, context);
7250 static inline MonoMethod *
7251 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7253 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7255 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7261 static inline MonoClass*
7262 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7267 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7268 klass = mono_method_get_wrapper_data (method, token);
7270 klass = mono_class_inflate_generic_class (klass, context);
7272 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7273 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7276 mono_class_init (klass);
7280 static inline MonoMethodSignature*
7281 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7283 MonoMethodSignature *fsig;
7285 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7286 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7288 fsig = mono_metadata_parse_signature (method->klass->image, token);
7292 fsig = mono_inflate_generic_signature(fsig, context, &error);
7294 g_assert(mono_error_ok(&error));
7300 throw_exception (void)
7302 static MonoMethod *method = NULL;
7305 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7306 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7313 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7315 MonoMethod *thrower = throw_exception ();
7318 EMIT_NEW_PCONST (cfg, args [0], ex);
7319 mono_emit_method_call (cfg, thrower, args, NULL);
7323 * Return the original method is a wrapper is specified. We can only access
7324 * the custom attributes from the original method.
7327 get_original_method (MonoMethod *method)
7329 if (method->wrapper_type == MONO_WRAPPER_NONE)
7332 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7333 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7336 /* in other cases we need to find the original method */
7337 return mono_marshal_method_from_wrapper (method);
7341 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7343 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7344 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7346 emit_throw_exception (cfg, ex);
7350 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7352 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7353 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7355 emit_throw_exception (cfg, ex);
7359 * Check that the IL instructions at ip are the array initialization
7360 * sequence and return the pointer to the data and the size.
7363 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7366 * newarr[System.Int32]
7368 * ldtoken field valuetype ...
7369 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7371 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7373 guint32 token = read32 (ip + 7);
7374 guint32 field_token = read32 (ip + 2);
7375 guint32 field_index = field_token & 0xffffff;
7377 const char *data_ptr;
7379 MonoMethod *cmethod;
7380 MonoClass *dummy_class;
7381 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7385 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7389 *out_field_token = field_token;
7391 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7394 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7396 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7397 case MONO_TYPE_BOOLEAN:
7401 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7402 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7403 case MONO_TYPE_CHAR:
7420 if (size > mono_type_size (field->type, &dummy_align))
7423 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7424 if (!image_is_dynamic (method->klass->image)) {
7425 field_index = read32 (ip + 2) & 0xffffff;
7426 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7427 data_ptr = mono_image_rva_map (method->klass->image, rva);
7428 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7429 /* for aot code we do the lookup on load */
7430 if (aot && data_ptr)
7431 return GUINT_TO_POINTER (rva);
7433 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7435 data_ptr = mono_field_get_data (field);
7443 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7445 char *method_fname = mono_method_full_name (method, TRUE);
7447 MonoMethodHeader *header = mono_method_get_header (method);
7449 if (header->code_size == 0)
7450 method_code = g_strdup ("method body is empty.");
7452 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7453 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7454 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7455 g_free (method_fname);
7456 g_free (method_code);
7457 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7461 set_exception_object (MonoCompile *cfg, MonoException *exception)
7463 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7464 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr, MONO_ROOT_SOURCE_JIT, "jit exception");
7465 cfg->exception_ptr = exception;
7469 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7472 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7473 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7474 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7475 /* Optimize reg-reg moves away */
7477 * Can't optimize other opcodes, since sp[0] might point to
7478 * the last ins of a decomposed opcode.
7480 sp [0]->dreg = (cfg)->locals [n]->dreg;
7482 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7487 * ldloca inhibits many optimizations so try to get rid of it in common
7490 static inline unsigned char *
7491 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7501 local = read16 (ip + 2);
7505 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7506 /* From the INITOBJ case */
7507 token = read32 (ip + 2);
7508 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7509 CHECK_TYPELOAD (klass);
7510 type = mini_get_underlying_type (&klass->byval_arg);
7511 emit_init_local (cfg, local, type, TRUE);
7519 emit_runtime_constant (MonoCompile *cfg, MonoInst **ins, MonoJumpInfoType patch_type)
7521 if (cfg->compile_aot) {
7522 EMIT_NEW_AOTCONST (cfg, *ins, patch_type, NULL);
7527 ji.type = patch_type;
7528 target = mono_resolve_patch_target (NULL, NULL, NULL, &ji, FALSE);
7530 EMIT_NEW_PCONST (cfg, *ins, target);
7535 is_exception_class (MonoClass *klass)
7538 if (klass == mono_defaults.exception_class)
7540 klass = klass->parent;
7546 * is_jit_optimizer_disabled:
7548 * Determine whenever M's assembly has a DebuggableAttribute with the
7549 * IsJITOptimizerDisabled flag set.
7552 is_jit_optimizer_disabled (MonoMethod *m)
7554 MonoAssembly *ass = m->klass->image->assembly;
7555 MonoCustomAttrInfo* attrs;
7556 static MonoClass *klass;
7558 gboolean val = FALSE;
7561 if (ass->jit_optimizer_disabled_inited)
7562 return ass->jit_optimizer_disabled;
7565 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7568 ass->jit_optimizer_disabled = FALSE;
7569 mono_memory_barrier ();
7570 ass->jit_optimizer_disabled_inited = TRUE;
7574 attrs = mono_custom_attrs_from_assembly (ass);
7576 for (i = 0; i < attrs->num_attrs; ++i) {
7577 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7579 MonoMethodSignature *sig;
7581 if (!attr->ctor || attr->ctor->klass != klass)
7583 /* Decode the attribute. See reflection.c */
7584 p = (const char*)attr->data;
7585 g_assert (read16 (p) == 0x0001);
7588 // FIXME: Support named parameters
7589 sig = mono_method_signature (attr->ctor);
7590 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7592 /* Two boolean arguments */
7596 mono_custom_attrs_free (attrs);
7599 ass->jit_optimizer_disabled = val;
7600 mono_memory_barrier ();
7601 ass->jit_optimizer_disabled_inited = TRUE;
7607 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7609 gboolean supported_tail_call;
7612 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7614 for (i = 0; i < fsig->param_count; ++i) {
7615 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7616 /* These can point to the current method's stack */
7617 supported_tail_call = FALSE;
7619 if (fsig->hasthis && cmethod->klass->valuetype)
7620 /* this might point to the current method's stack */
7621 supported_tail_call = FALSE;
7622 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7623 supported_tail_call = FALSE;
7624 if (cfg->method->save_lmf)
7625 supported_tail_call = FALSE;
7626 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7627 supported_tail_call = FALSE;
7628 if (call_opcode != CEE_CALL)
7629 supported_tail_call = FALSE;
7631 /* Debugging support */
7633 if (supported_tail_call) {
7634 if (!mono_debug_count ())
7635 supported_tail_call = FALSE;
7639 return supported_tail_call;
7645 * Handle calls made to ctors from NEWOBJ opcodes.
7648 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7649 MonoInst **sp, guint8 *ip, int *inline_costs)
7651 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7653 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7654 mono_method_is_generic_sharable (cmethod, TRUE)) {
7655 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7656 mono_class_vtable (cfg->domain, cmethod->klass);
7657 CHECK_TYPELOAD (cmethod->klass);
7659 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7660 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7663 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7664 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7666 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7668 CHECK_TYPELOAD (cmethod->klass);
7669 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7674 /* Avoid virtual calls to ctors if possible */
7675 if (mono_class_is_marshalbyref (cmethod->klass))
7676 callvirt_this_arg = sp [0];
7678 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7679 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7680 CHECK_CFG_EXCEPTION;
7681 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7682 mono_method_check_inlining (cfg, cmethod) &&
7683 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7686 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7687 cfg->real_offset += 5;
7689 *inline_costs += costs - 5;
7691 INLINE_FAILURE ("inline failure");
7692 // FIXME-VT: Clean this up
7693 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7694 GSHAREDVT_FAILURE(*ip);
7695 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7697 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7700 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7701 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7702 } else if (context_used &&
7703 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7704 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7705 MonoInst *cmethod_addr;
7707 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7709 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7710 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7712 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7714 INLINE_FAILURE ("ctor call");
7715 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7716 callvirt_this_arg, NULL, vtable_arg);
7723 emit_setret (MonoCompile *cfg, MonoInst *val)
7725 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7728 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7731 if (!cfg->vret_addr) {
7732 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7734 EMIT_NEW_RETLOADA (cfg, ret_addr);
7736 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7737 ins->klass = mono_class_from_mono_type (ret_type);
7740 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7741 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7742 MonoInst *iargs [1];
7746 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7747 mono_arch_emit_setret (cfg, cfg->method, conv);
7749 mono_arch_emit_setret (cfg, cfg->method, val);
7752 mono_arch_emit_setret (cfg, cfg->method, val);
7757 static MonoMethodSignature*
7758 sig_to_rgctx_sig (MonoMethodSignature *sig)
7760 // FIXME: memory allocation
7761 MonoMethodSignature *res;
7764 res = g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
7765 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
7766 res->param_count = sig->param_count + 1;
7767 for (i = 0; i < sig->param_count; ++i)
7768 res->params [i] = sig->params [i];
7769 res->params [sig->param_count] = &mono_defaults.int_class->byval_arg;
7774 * mono_method_to_ir:
7776 * Translate the .net IL into linear IR.
7779 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7780 MonoInst *return_var, MonoInst **inline_args,
7781 guint inline_offset, gboolean is_virtual_call)
7784 MonoInst *ins, **sp, **stack_start;
7785 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7786 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7787 MonoMethod *cmethod, *method_definition;
7788 MonoInst **arg_array;
7789 MonoMethodHeader *header;
7791 guint32 token, ins_flag;
7793 MonoClass *constrained_class = NULL;
7794 unsigned char *ip, *end, *target, *err_pos;
7795 MonoMethodSignature *sig;
7796 MonoGenericContext *generic_context = NULL;
7797 MonoGenericContainer *generic_container = NULL;
7798 MonoType **param_types;
7799 int i, n, start_new_bblock, dreg;
7800 int num_calls = 0, inline_costs = 0;
7801 int breakpoint_id = 0;
7803 GSList *class_inits = NULL;
7804 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7806 gboolean init_locals, seq_points, skip_dead_blocks;
7807 gboolean sym_seq_points = FALSE;
7808 MonoDebugMethodInfo *minfo;
7809 MonoBitSet *seq_point_locs = NULL;
7810 MonoBitSet *seq_point_set_locs = NULL;
7812 cfg->disable_inline = is_jit_optimizer_disabled (method);
7814 /* serialization and xdomain stuff may need access to private fields and methods */
7815 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7816 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7817 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7818 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7819 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7820 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7822 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7823 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7824 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7825 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7826 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7828 image = method->klass->image;
7829 header = mono_method_get_header (method);
7831 MonoLoaderError *error;
7833 if ((error = mono_loader_get_last_error ())) {
7834 mono_cfg_set_exception (cfg, error->exception_type);
7836 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7837 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7839 goto exception_exit;
7841 generic_container = mono_method_get_generic_container (method);
7842 sig = mono_method_signature (method);
7843 num_args = sig->hasthis + sig->param_count;
7844 ip = (unsigned char*)header->code;
7845 cfg->cil_start = ip;
7846 end = ip + header->code_size;
7847 cfg->stat_cil_code_size += header->code_size;
7849 seq_points = cfg->gen_seq_points && cfg->method == method;
7851 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7852 /* We could hit a seq point before attaching to the JIT (#8338) */
7856 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7857 minfo = mono_debug_lookup_method (method);
7859 MonoSymSeqPoint *sps;
7860 int i, n_il_offsets;
7862 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7863 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7864 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7865 sym_seq_points = TRUE;
7866 for (i = 0; i < n_il_offsets; ++i) {
7867 if (sps [i].il_offset < header->code_size)
7868 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7871 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7872 /* Methods without line number info like auto-generated property accessors */
7873 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7874 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7875 sym_seq_points = TRUE;
7880 * Methods without init_locals set could cause asserts in various passes
7881 * (#497220). To work around this, we emit dummy initialization opcodes
7882 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7883 * on some platforms.
7885 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7886 init_locals = header->init_locals;
7890 method_definition = method;
7891 while (method_definition->is_inflated) {
7892 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7893 method_definition = imethod->declaring;
7896 /* SkipVerification is not allowed if core-clr is enabled */
7897 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7899 dont_verify_stloc = TRUE;
7902 if (sig->is_inflated)
7903 generic_context = mono_method_get_context (method);
7904 else if (generic_container)
7905 generic_context = &generic_container->context;
7906 cfg->generic_context = generic_context;
7909 g_assert (!sig->has_type_parameters);
7911 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7912 g_assert (method->is_inflated);
7913 g_assert (mono_method_get_context (method)->method_inst);
7915 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7916 g_assert (sig->generic_param_count);
7918 if (cfg->method == method) {
7919 cfg->real_offset = 0;
7921 cfg->real_offset = inline_offset;
7924 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7925 cfg->cil_offset_to_bb_len = header->code_size;
7927 cfg->current_method = method;
7929 if (cfg->verbose_level > 2)
7930 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7932 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7934 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7935 for (n = 0; n < sig->param_count; ++n)
7936 param_types [n + sig->hasthis] = sig->params [n];
7937 cfg->arg_types = param_types;
7939 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7940 if (cfg->method == method) {
7942 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7943 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7946 NEW_BBLOCK (cfg, start_bblock);
7947 cfg->bb_entry = start_bblock;
7948 start_bblock->cil_code = NULL;
7949 start_bblock->cil_length = 0;
7952 NEW_BBLOCK (cfg, end_bblock);
7953 cfg->bb_exit = end_bblock;
7954 end_bblock->cil_code = NULL;
7955 end_bblock->cil_length = 0;
7956 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7957 g_assert (cfg->num_bblocks == 2);
7959 arg_array = cfg->args;
7961 if (header->num_clauses) {
7962 cfg->spvars = g_hash_table_new (NULL, NULL);
7963 cfg->exvars = g_hash_table_new (NULL, NULL);
7965 /* handle exception clauses */
7966 for (i = 0; i < header->num_clauses; ++i) {
7967 MonoBasicBlock *try_bb;
7968 MonoExceptionClause *clause = &header->clauses [i];
7969 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7971 try_bb->real_offset = clause->try_offset;
7972 try_bb->try_start = TRUE;
7973 try_bb->region = ((i + 1) << 8) | clause->flags;
7974 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7975 tblock->real_offset = clause->handler_offset;
7976 tblock->flags |= BB_EXCEPTION_HANDLER;
7979 * Linking the try block with the EH block hinders inlining as we won't be able to
7980 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7982 if (COMPILE_LLVM (cfg))
7983 link_bblock (cfg, try_bb, tblock);
7985 if (*(ip + clause->handler_offset) == CEE_POP)
7986 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7988 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7989 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7990 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7991 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7992 MONO_ADD_INS (tblock, ins);
7994 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7995 /* finally clauses already have a seq point */
7996 /* seq points for filter clauses are emitted below */
7997 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7998 MONO_ADD_INS (tblock, ins);
8001 /* todo: is a fault block unsafe to optimize? */
8002 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8003 tblock->flags |= BB_EXCEPTION_UNSAFE;
8006 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8008 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8010 /* catch and filter blocks get the exception object on the stack */
8011 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8012 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8014 /* mostly like handle_stack_args (), but just sets the input args */
8015 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8016 tblock->in_scount = 1;
8017 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8018 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8022 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8023 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8024 if (!cfg->compile_llvm) {
8025 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8026 ins->dreg = tblock->in_stack [0]->dreg;
8027 MONO_ADD_INS (tblock, ins);
8030 MonoInst *dummy_use;
8033 * Add a dummy use for the exvar so its liveness info will be
8036 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8039 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8040 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8041 MONO_ADD_INS (tblock, ins);
8044 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8045 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8046 tblock->flags |= BB_EXCEPTION_HANDLER;
8047 tblock->real_offset = clause->data.filter_offset;
8048 tblock->in_scount = 1;
8049 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8050 /* The filter block shares the exvar with the handler block */
8051 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8052 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8053 MONO_ADD_INS (tblock, ins);
8057 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8058 clause->data.catch_class &&
8060 mono_class_check_context_used (clause->data.catch_class)) {
8062 * In shared generic code with catch
8063 * clauses containing type variables
8064 * the exception handling code has to
8065 * be able to get to the rgctx.
8066 * Therefore we have to make sure that
8067 * the vtable/mrgctx argument (for
8068 * static or generic methods) or the
8069 * "this" argument (for non-static
8070 * methods) are live.
8072 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8073 mini_method_get_context (method)->method_inst ||
8074 method->klass->valuetype) {
8075 mono_get_vtable_var (cfg);
8077 MonoInst *dummy_use;
8079 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8084 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8085 cfg->cbb = start_bblock;
8086 cfg->args = arg_array;
8087 mono_save_args (cfg, sig, inline_args);
8090 /* FIRST CODE BLOCK */
8091 NEW_BBLOCK (cfg, tblock);
8092 tblock->cil_code = ip;
8096 ADD_BBLOCK (cfg, tblock);
8098 if (cfg->method == method) {
8099 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8100 if (breakpoint_id) {
8101 MONO_INST_NEW (cfg, ins, OP_BREAK);
8102 MONO_ADD_INS (cfg->cbb, ins);
8106 /* we use a separate basic block for the initialization code */
8107 NEW_BBLOCK (cfg, init_localsbb);
8108 cfg->bb_init = init_localsbb;
8109 init_localsbb->real_offset = cfg->real_offset;
8110 start_bblock->next_bb = init_localsbb;
8111 init_localsbb->next_bb = cfg->cbb;
8112 link_bblock (cfg, start_bblock, init_localsbb);
8113 link_bblock (cfg, init_localsbb, cfg->cbb);
8115 cfg->cbb = init_localsbb;
8117 if (cfg->gsharedvt && cfg->method == method) {
8118 MonoGSharedVtMethodInfo *info;
8119 MonoInst *var, *locals_var;
8122 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8123 info->method = cfg->method;
8124 info->count_entries = 16;
8125 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8126 cfg->gsharedvt_info = info;
8128 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8129 /* prevent it from being register allocated */
8130 //var->flags |= MONO_INST_VOLATILE;
8131 cfg->gsharedvt_info_var = var;
8133 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8134 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8136 /* Allocate locals */
8137 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8138 /* prevent it from being register allocated */
8139 //locals_var->flags |= MONO_INST_VOLATILE;
8140 cfg->gsharedvt_locals_var = locals_var;
8142 dreg = alloc_ireg (cfg);
8143 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8145 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8146 ins->dreg = locals_var->dreg;
8148 MONO_ADD_INS (cfg->cbb, ins);
8149 cfg->gsharedvt_locals_var_ins = ins;
8151 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8154 ins->flags |= MONO_INST_INIT;
8158 if (mono_security_core_clr_enabled ()) {
8159 /* check if this is native code, e.g. an icall or a p/invoke */
8160 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8161 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8163 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8164 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8166 /* if this ia a native call then it can only be JITted from platform code */
8167 if ((icall || pinvk) && method->klass && method->klass->image) {
8168 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8169 MonoException *ex = icall ? mono_get_exception_security () :
8170 mono_get_exception_method_access ();
8171 emit_throw_exception (cfg, ex);
8178 CHECK_CFG_EXCEPTION;
8180 if (header->code_size == 0)
8183 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8188 if (cfg->method == method)
8189 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8191 for (n = 0; n < header->num_locals; ++n) {
8192 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8197 /* We force the vtable variable here for all shared methods
8198 for the possibility that they might show up in a stack
8199 trace where their exact instantiation is needed. */
8200 if (cfg->gshared && method == cfg->method) {
8201 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8202 mini_method_get_context (method)->method_inst ||
8203 method->klass->valuetype) {
8204 mono_get_vtable_var (cfg);
8206 /* FIXME: Is there a better way to do this?
8207 We need the variable live for the duration
8208 of the whole method. */
8209 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8213 /* add a check for this != NULL to inlined methods */
8214 if (is_virtual_call) {
8217 NEW_ARGLOAD (cfg, arg_ins, 0);
8218 MONO_ADD_INS (cfg->cbb, arg_ins);
8219 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8222 skip_dead_blocks = !dont_verify;
8223 if (skip_dead_blocks) {
8224 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8229 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8230 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8233 start_new_bblock = 0;
8235 if (cfg->method == method)
8236 cfg->real_offset = ip - header->code;
8238 cfg->real_offset = inline_offset;
8243 if (start_new_bblock) {
8244 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8245 if (start_new_bblock == 2) {
8246 g_assert (ip == tblock->cil_code);
8248 GET_BBLOCK (cfg, tblock, ip);
8250 cfg->cbb->next_bb = tblock;
8252 start_new_bblock = 0;
8253 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8254 if (cfg->verbose_level > 3)
8255 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8256 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8260 g_slist_free (class_inits);
8263 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8264 link_bblock (cfg, cfg->cbb, tblock);
8265 if (sp != stack_start) {
8266 handle_stack_args (cfg, stack_start, sp - stack_start);
8268 CHECK_UNVERIFIABLE (cfg);
8270 cfg->cbb->next_bb = tblock;
8272 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8273 if (cfg->verbose_level > 3)
8274 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8275 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8278 g_slist_free (class_inits);
8283 if (skip_dead_blocks) {
8284 int ip_offset = ip - header->code;
8286 if (ip_offset == bb->end)
8290 int op_size = mono_opcode_size (ip, end);
8291 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8293 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8295 if (ip_offset + op_size == bb->end) {
8296 MONO_INST_NEW (cfg, ins, OP_NOP);
8297 MONO_ADD_INS (cfg->cbb, ins);
8298 start_new_bblock = 1;
8306 * Sequence points are points where the debugger can place a breakpoint.
8307 * Currently, we generate these automatically at points where the IL
8310 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8312 * Make methods interruptable at the beginning, and at the targets of
8313 * backward branches.
8314 * Also, do this at the start of every bblock in methods with clauses too,
8315 * to be able to handle instructions with inprecise control flow like
8317 * Backward branches are handled at the end of method-to-ir ().
8319 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8320 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8322 /* Avoid sequence points on empty IL like .volatile */
8323 // FIXME: Enable this
8324 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8325 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8326 if ((sp != stack_start) && !sym_seq_point)
8327 ins->flags |= MONO_INST_NONEMPTY_STACK;
8328 MONO_ADD_INS (cfg->cbb, ins);
8331 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8334 cfg->cbb->real_offset = cfg->real_offset;
8336 if ((cfg->method == method) && cfg->coverage_info) {
8337 guint32 cil_offset = ip - header->code;
8338 cfg->coverage_info->data [cil_offset].cil_code = ip;
8340 /* TODO: Use an increment here */
8341 #if defined(TARGET_X86)
8342 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8343 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8345 MONO_ADD_INS (cfg->cbb, ins);
8347 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8348 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8352 if (cfg->verbose_level > 3)
8353 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8357 if (seq_points && !sym_seq_points && sp != stack_start) {
8359 * The C# compiler uses these nops to notify the JIT that it should
8360 * insert seq points.
8362 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8363 MONO_ADD_INS (cfg->cbb, ins);
8365 if (cfg->keep_cil_nops)
8366 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8368 MONO_INST_NEW (cfg, ins, OP_NOP);
8370 MONO_ADD_INS (cfg->cbb, ins);
8373 if (should_insert_brekpoint (cfg->method)) {
8374 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8376 MONO_INST_NEW (cfg, ins, OP_NOP);
8379 MONO_ADD_INS (cfg->cbb, ins);
8385 CHECK_STACK_OVF (1);
8386 n = (*ip)-CEE_LDARG_0;
8388 EMIT_NEW_ARGLOAD (cfg, ins, n);
8396 CHECK_STACK_OVF (1);
8397 n = (*ip)-CEE_LDLOC_0;
8399 EMIT_NEW_LOCLOAD (cfg, ins, n);
8408 n = (*ip)-CEE_STLOC_0;
8411 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8413 emit_stloc_ir (cfg, sp, header, n);
8420 CHECK_STACK_OVF (1);
8423 EMIT_NEW_ARGLOAD (cfg, ins, n);
8429 CHECK_STACK_OVF (1);
8432 NEW_ARGLOADA (cfg, ins, n);
8433 MONO_ADD_INS (cfg->cbb, ins);
8443 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8445 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8450 CHECK_STACK_OVF (1);
8453 EMIT_NEW_LOCLOAD (cfg, ins, n);
8457 case CEE_LDLOCA_S: {
8458 unsigned char *tmp_ip;
8460 CHECK_STACK_OVF (1);
8461 CHECK_LOCAL (ip [1]);
8463 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8469 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8478 CHECK_LOCAL (ip [1]);
8479 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8481 emit_stloc_ir (cfg, sp, header, ip [1]);
8486 CHECK_STACK_OVF (1);
8487 EMIT_NEW_PCONST (cfg, ins, NULL);
8488 ins->type = STACK_OBJ;
8493 CHECK_STACK_OVF (1);
8494 EMIT_NEW_ICONST (cfg, ins, -1);
8507 CHECK_STACK_OVF (1);
8508 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8514 CHECK_STACK_OVF (1);
8516 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8522 CHECK_STACK_OVF (1);
8523 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8529 CHECK_STACK_OVF (1);
8530 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8531 ins->type = STACK_I8;
8532 ins->dreg = alloc_dreg (cfg, STACK_I8);
8534 ins->inst_l = (gint64)read64 (ip);
8535 MONO_ADD_INS (cfg->cbb, ins);
8541 gboolean use_aotconst = FALSE;
8543 #ifdef TARGET_POWERPC
8544 /* FIXME: Clean this up */
8545 if (cfg->compile_aot)
8546 use_aotconst = TRUE;
8549 /* FIXME: we should really allocate this only late in the compilation process */
8550 f = mono_domain_alloc (cfg->domain, sizeof (float));
8552 CHECK_STACK_OVF (1);
8558 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8560 dreg = alloc_freg (cfg);
8561 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8562 ins->type = cfg->r4_stack_type;
8564 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8565 ins->type = cfg->r4_stack_type;
8566 ins->dreg = alloc_dreg (cfg, STACK_R8);
8568 MONO_ADD_INS (cfg->cbb, ins);
8578 gboolean use_aotconst = FALSE;
8580 #ifdef TARGET_POWERPC
8581 /* FIXME: Clean this up */
8582 if (cfg->compile_aot)
8583 use_aotconst = TRUE;
8586 /* FIXME: we should really allocate this only late in the compilation process */
8587 d = mono_domain_alloc (cfg->domain, sizeof (double));
8589 CHECK_STACK_OVF (1);
8595 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8597 dreg = alloc_freg (cfg);
8598 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8599 ins->type = STACK_R8;
8601 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8602 ins->type = STACK_R8;
8603 ins->dreg = alloc_dreg (cfg, STACK_R8);
8605 MONO_ADD_INS (cfg->cbb, ins);
8614 MonoInst *temp, *store;
8616 CHECK_STACK_OVF (1);
8620 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8621 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8623 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8626 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8639 if (sp [0]->type == STACK_R8)
8640 /* we need to pop the value from the x86 FP stack */
8641 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8646 MonoMethodSignature *fsig;
8649 INLINE_FAILURE ("jmp");
8650 GSHAREDVT_FAILURE (*ip);
8653 if (stack_start != sp)
8655 token = read32 (ip + 1);
8656 /* FIXME: check the signature matches */
8657 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8659 if (!cmethod || mono_loader_get_last_error ())
8662 if (cfg->gshared && mono_method_check_context_used (cmethod))
8663 GENERIC_SHARING_FAILURE (CEE_JMP);
8665 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8667 fsig = mono_method_signature (cmethod);
8668 n = fsig->param_count + fsig->hasthis;
8669 if (cfg->llvm_only) {
8672 args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8673 for (i = 0; i < n; ++i)
8674 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8675 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8677 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8678 * have to emit a normal return since llvm expects it.
8681 emit_setret (cfg, ins);
8682 MONO_INST_NEW (cfg, ins, OP_BR);
8683 ins->inst_target_bb = end_bblock;
8684 MONO_ADD_INS (cfg->cbb, ins);
8685 link_bblock (cfg, cfg->cbb, end_bblock);
8688 } else if (cfg->backend->have_op_tail_call) {
8689 /* Handle tail calls similarly to calls */
8692 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8693 call->method = cmethod;
8694 call->tail_call = TRUE;
8695 call->signature = mono_method_signature (cmethod);
8696 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8697 call->inst.inst_p0 = cmethod;
8698 for (i = 0; i < n; ++i)
8699 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8701 mono_arch_emit_call (cfg, call);
8702 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8703 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8705 for (i = 0; i < num_args; ++i)
8706 /* Prevent arguments from being optimized away */
8707 arg_array [i]->flags |= MONO_INST_VOLATILE;
8709 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8710 ins = (MonoInst*)call;
8711 ins->inst_p0 = cmethod;
8712 MONO_ADD_INS (cfg->cbb, ins);
8716 start_new_bblock = 1;
8721 MonoMethodSignature *fsig;
8724 token = read32 (ip + 1);
8728 //GSHAREDVT_FAILURE (*ip);
8733 fsig = mini_get_signature (method, token, generic_context);
8735 if (method->dynamic && fsig->pinvoke) {
8739 * This is a call through a function pointer using a pinvoke
8740 * signature. Have to create a wrapper and call that instead.
8741 * FIXME: This is very slow, need to create a wrapper at JIT time
8742 * instead based on the signature.
8744 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8745 EMIT_NEW_PCONST (cfg, args [1], fsig);
8747 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8750 n = fsig->param_count + fsig->hasthis;
8754 //g_assert (!virtual || fsig->hasthis);
8758 inline_costs += 10 * num_calls++;
8761 * Making generic calls out of gsharedvt methods.
8762 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8763 * patching gshared method addresses into a gsharedvt method.
8765 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8767 * We pass the address to the gsharedvt trampoline in the rgctx reg
8769 MonoInst *callee = addr;
8771 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8773 GSHAREDVT_FAILURE (*ip);
8775 addr = emit_get_rgctx_sig (cfg, context_used,
8776 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8777 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8781 /* Prevent inlining of methods with indirect calls */
8782 INLINE_FAILURE ("indirect call");
8784 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8789 * Instead of emitting an indirect call, emit a direct call
8790 * with the contents of the aotconst as the patch info.
8792 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8793 info_type = addr->inst_c1;
8794 info_data = addr->inst_p0;
8796 info_type = addr->inst_right->inst_c1;
8797 info_data = addr->inst_right->inst_left;
8800 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8801 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8806 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8810 /* End of call, INS should contain the result of the call, if any */
8812 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8814 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8817 CHECK_CFG_EXCEPTION;
8821 constrained_class = NULL;
8825 case CEE_CALLVIRT: {
8826 MonoInst *addr = NULL;
8827 MonoMethodSignature *fsig = NULL;
8829 int virtual = *ip == CEE_CALLVIRT;
8830 gboolean pass_imt_from_rgctx = FALSE;
8831 MonoInst *imt_arg = NULL;
8832 MonoInst *keep_this_alive = NULL;
8833 gboolean pass_vtable = FALSE;
8834 gboolean pass_mrgctx = FALSE;
8835 MonoInst *vtable_arg = NULL;
8836 gboolean check_this = FALSE;
8837 gboolean supported_tail_call = FALSE;
8838 gboolean tail_call = FALSE;
8839 gboolean need_seq_point = FALSE;
8840 guint32 call_opcode = *ip;
8841 gboolean emit_widen = TRUE;
8842 gboolean push_res = TRUE;
8843 gboolean skip_ret = FALSE;
8844 gboolean delegate_invoke = FALSE;
8845 gboolean direct_icall = FALSE;
8846 gboolean constrained_partial_call = FALSE;
8847 MonoMethod *cil_method;
8850 token = read32 (ip + 1);
8854 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8855 cil_method = cmethod;
8857 if (constrained_class) {
8858 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8859 if (!mini_is_gsharedvt_klass (constrained_class)) {
8860 g_assert (!cmethod->klass->valuetype);
8861 if (!mini_type_is_reference (&constrained_class->byval_arg))
8862 constrained_partial_call = TRUE;
8866 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8867 if (cfg->verbose_level > 2)
8868 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8869 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8870 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8872 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8876 if (cfg->verbose_level > 2)
8877 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8879 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8881 * This is needed since get_method_constrained can't find
8882 * the method in klass representing a type var.
8883 * The type var is guaranteed to be a reference type in this
8886 if (!mini_is_gsharedvt_klass (constrained_class))
8887 g_assert (!cmethod->klass->valuetype);
8889 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8895 if (!cmethod || mono_loader_get_last_error ())
8897 if (!dont_verify && !cfg->skip_visibility) {
8898 MonoMethod *target_method = cil_method;
8899 if (method->is_inflated) {
8900 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8902 if (!mono_method_can_access_method (method_definition, target_method) &&
8903 !mono_method_can_access_method (method, cil_method))
8904 METHOD_ACCESS_FAILURE (method, cil_method);
8907 if (mono_security_core_clr_enabled ())
8908 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8910 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8911 /* MS.NET seems to silently convert this to a callvirt */
8916 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8917 * converts to a callvirt.
8919 * tests/bug-515884.il is an example of this behavior
8921 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8922 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8923 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8927 if (!cmethod->klass->inited)
8928 if (!mono_class_init (cmethod->klass))
8929 TYPE_LOAD_ERROR (cmethod->klass);
8931 fsig = mono_method_signature (cmethod);
8934 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8935 mini_class_is_system_array (cmethod->klass)) {
8936 array_rank = cmethod->klass->rank;
8937 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8938 direct_icall = TRUE;
8939 } else if (fsig->pinvoke) {
8940 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8941 fsig = mono_method_signature (wrapper);
8942 } else if (constrained_class) {
8944 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8948 /* See code below */
8949 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8950 MonoBasicBlock *tbb;
8952 GET_BBLOCK (cfg, tbb, ip + 5);
8953 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8955 * We want to extend the try block to cover the call, but we can't do it if the
8956 * call is made directly since its followed by an exception check.
8958 direct_icall = FALSE;
8962 mono_save_token_info (cfg, image, token, cil_method);
8964 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8965 need_seq_point = TRUE;
8967 /* Don't support calls made using type arguments for now */
8969 if (cfg->gsharedvt) {
8970 if (mini_is_gsharedvt_signature (fsig))
8971 GSHAREDVT_FAILURE (*ip);
8975 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8976 g_assert_not_reached ();
8978 n = fsig->param_count + fsig->hasthis;
8980 if (!cfg->gshared && cmethod->klass->generic_container)
8984 g_assert (!mono_method_check_context_used (cmethod));
8988 //g_assert (!virtual || fsig->hasthis);
8992 if (constrained_class) {
8993 if (mini_is_gsharedvt_klass (constrained_class)) {
8994 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8995 /* The 'Own method' case below */
8996 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8997 /* 'The type parameter is instantiated as a reference type' case below. */
8999 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9000 CHECK_CFG_EXCEPTION;
9007 * We have the `constrained.' prefix opcode.
9009 if (constrained_partial_call) {
9010 gboolean need_box = TRUE;
9013 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9014 * called method is not known at compile time either. The called method could end up being
9015 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9016 * to box the receiver.
9017 * A simple solution would be to box always and make a normal virtual call, but that would
9018 * be bad performance wise.
9020 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9022 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9027 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9028 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9029 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9030 ins->klass = constrained_class;
9031 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9032 CHECK_CFG_EXCEPTION;
9033 } else if (need_box) {
9035 MonoBasicBlock *is_ref_bb, *end_bb;
9036 MonoInst *nonbox_call;
9039 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9041 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9042 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9044 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9046 NEW_BBLOCK (cfg, is_ref_bb);
9047 NEW_BBLOCK (cfg, end_bb);
9049 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
9051 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9054 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9056 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9059 MONO_START_BB (cfg, is_ref_bb);
9060 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9061 ins->klass = constrained_class;
9062 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9063 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9067 MONO_START_BB (cfg, end_bb);
9070 nonbox_call->dreg = ins->dreg;
9073 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9074 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9075 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9078 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9080 * The type parameter is instantiated as a valuetype,
9081 * but that type doesn't override the method we're
9082 * calling, so we need to box `this'.
9084 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9085 ins->klass = constrained_class;
9086 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9087 CHECK_CFG_EXCEPTION;
9088 } else if (!constrained_class->valuetype) {
9089 int dreg = alloc_ireg_ref (cfg);
9092 * The type parameter is instantiated as a reference
9093 * type. We have a managed pointer on the stack, so
9094 * we need to dereference it here.
9096 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9097 ins->type = STACK_OBJ;
9100 if (cmethod->klass->valuetype) {
9103 /* Interface method */
9106 mono_class_setup_vtable (constrained_class);
9107 CHECK_TYPELOAD (constrained_class);
9108 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9110 TYPE_LOAD_ERROR (constrained_class);
9111 slot = mono_method_get_vtable_slot (cmethod);
9113 TYPE_LOAD_ERROR (cmethod->klass);
9114 cmethod = constrained_class->vtable [ioffset + slot];
9116 if (cmethod->klass == mono_defaults.enum_class) {
9117 /* Enum implements some interfaces, so treat this as the first case */
9118 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9119 ins->klass = constrained_class;
9120 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9121 CHECK_CFG_EXCEPTION;
9126 constrained_class = NULL;
9129 if (check_call_signature (cfg, fsig, sp))
9132 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9133 delegate_invoke = TRUE;
9135 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9136 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9137 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9145 * If the callee is a shared method, then its static cctor
9146 * might not get called after the call was patched.
9148 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9149 emit_class_init (cfg, cmethod->klass);
9150 CHECK_TYPELOAD (cmethod->klass);
9153 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9156 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9158 context_used = mini_method_check_context_used (cfg, cmethod);
9160 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9161 /* Generic method interface
9162 calls are resolved via a
9163 helper function and don't
9165 if (!cmethod_context || !cmethod_context->method_inst)
9166 pass_imt_from_rgctx = TRUE;
9170 * If a shared method calls another
9171 * shared method then the caller must
9172 * have a generic sharing context
9173 * because the magic trampoline
9174 * requires it. FIXME: We shouldn't
9175 * have to force the vtable/mrgctx
9176 * variable here. Instead there
9177 * should be a flag in the cfg to
9178 * request a generic sharing context.
9181 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9182 mono_get_vtable_var (cfg);
9187 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9189 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9191 CHECK_TYPELOAD (cmethod->klass);
9192 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9197 g_assert (!vtable_arg);
9199 if (!cfg->compile_aot) {
9201 * emit_get_rgctx_method () calls mono_class_vtable () so check
9202 * for type load errors before.
9204 mono_class_setup_vtable (cmethod->klass);
9205 CHECK_TYPELOAD (cmethod->klass);
9208 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9210 /* !marshalbyref is needed to properly handle generic methods + remoting */
9211 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9212 MONO_METHOD_IS_FINAL (cmethod)) &&
9213 !mono_class_is_marshalbyref (cmethod->klass)) {
9220 if (pass_imt_from_rgctx) {
9221 g_assert (!pass_vtable);
9223 imt_arg = emit_get_rgctx_method (cfg, context_used,
9224 cmethod, MONO_RGCTX_INFO_METHOD);
9228 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9230 /* Calling virtual generic methods */
9231 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9232 !(MONO_METHOD_IS_FINAL (cmethod) &&
9233 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9234 fsig->generic_param_count &&
9235 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9237 MonoInst *this_temp, *this_arg_temp, *store;
9238 MonoInst *iargs [4];
9240 g_assert (fsig->is_inflated);
9242 /* Prevent inlining of methods that contain indirect calls */
9243 INLINE_FAILURE ("virtual generic call");
9245 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9246 GSHAREDVT_FAILURE (*ip);
9248 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9249 g_assert (!imt_arg);
9251 g_assert (cmethod->is_inflated);
9252 imt_arg = emit_get_rgctx_method (cfg, context_used,
9253 cmethod, MONO_RGCTX_INFO_METHOD);
9254 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9256 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9257 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9258 MONO_ADD_INS (cfg->cbb, store);
9260 /* FIXME: This should be a managed pointer */
9261 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9263 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9264 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9265 cmethod, MONO_RGCTX_INFO_METHOD);
9266 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9267 addr = mono_emit_jit_icall (cfg,
9268 mono_helper_compile_generic_method, iargs);
9270 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9272 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9279 * Implement a workaround for the inherent races involved in locking:
9285 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9286 * try block, the Exit () won't be executed, see:
9287 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9288 * To work around this, we extend such try blocks to include the last x bytes
9289 * of the Monitor.Enter () call.
9291 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9292 MonoBasicBlock *tbb;
9294 GET_BBLOCK (cfg, tbb, ip + 5);
9296 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9297 * from Monitor.Enter like ArgumentNullException.
9299 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9300 /* Mark this bblock as needing to be extended */
9301 tbb->extend_try_block = TRUE;
9305 /* Conversion to a JIT intrinsic */
9306 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9307 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9308 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9315 if ((cfg->opt & MONO_OPT_INLINE) &&
9316 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9317 mono_method_check_inlining (cfg, cmethod)) {
9319 gboolean always = FALSE;
9321 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9322 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9323 /* Prevent inlining of methods that call wrappers */
9324 INLINE_FAILURE ("wrapper call");
9325 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9329 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9331 cfg->real_offset += 5;
9333 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9334 /* *sp is already set by inline_method */
9339 inline_costs += costs;
9345 /* Tail recursion elimination */
9346 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9347 gboolean has_vtargs = FALSE;
9350 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9351 INLINE_FAILURE ("tail call");
9353 /* keep it simple */
9354 for (i = fsig->param_count - 1; i >= 0; i--) {
9355 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9360 for (i = 0; i < n; ++i)
9361 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9362 MONO_INST_NEW (cfg, ins, OP_BR);
9363 MONO_ADD_INS (cfg->cbb, ins);
9364 tblock = start_bblock->out_bb [0];
9365 link_bblock (cfg, cfg->cbb, tblock);
9366 ins->inst_target_bb = tblock;
9367 start_new_bblock = 1;
9369 /* skip the CEE_RET, too */
9370 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9377 inline_costs += 10 * num_calls++;
9380 * Making generic calls out of gsharedvt methods.
9381 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9382 * patching gshared method addresses into a gsharedvt method.
9384 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9385 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9386 MonoRgctxInfoType info_type;
9389 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9390 //GSHAREDVT_FAILURE (*ip);
9391 // disable for possible remoting calls
9392 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9393 GSHAREDVT_FAILURE (*ip);
9394 if (fsig->generic_param_count) {
9395 /* virtual generic call */
9396 g_assert (!imt_arg);
9397 /* Same as the virtual generic case above */
9398 imt_arg = emit_get_rgctx_method (cfg, context_used,
9399 cmethod, MONO_RGCTX_INFO_METHOD);
9400 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9402 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9403 /* This can happen when we call a fully instantiated iface method */
9404 imt_arg = emit_get_rgctx_method (cfg, context_used,
9405 cmethod, MONO_RGCTX_INFO_METHOD);
9410 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9411 keep_this_alive = sp [0];
9413 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9414 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9416 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9417 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9419 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9423 /* Generic sharing */
9426 * Use this if the callee is gsharedvt sharable too, since
9427 * at runtime we might find an instantiation so the call cannot
9428 * be patched (the 'no_patch' code path in mini-trampolines.c).
9430 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9431 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9432 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9433 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9434 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9435 INLINE_FAILURE ("gshared");
9437 g_assert (cfg->gshared && cmethod);
9441 * We are compiling a call to a
9442 * generic method from shared code,
9443 * which means that we have to look up
9444 * the method in the rgctx and do an
9448 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9450 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9451 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9455 /* Direct calls to icalls */
9457 MonoMethod *wrapper;
9460 /* Inline the wrapper */
9461 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9463 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9464 g_assert (costs > 0);
9465 cfg->real_offset += 5;
9467 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9468 /* *sp is already set by inline_method */
9473 inline_costs += costs;
9482 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9483 MonoInst *val = sp [fsig->param_count];
9485 if (val->type == STACK_OBJ) {
9486 MonoInst *iargs [2];
9491 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9494 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9495 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9496 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9497 emit_write_barrier (cfg, addr, val);
9498 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9499 GSHAREDVT_FAILURE (*ip);
9500 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9501 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9503 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9504 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9505 if (!cmethod->klass->element_class->valuetype && !readonly)
9506 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9507 CHECK_TYPELOAD (cmethod->klass);
9510 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9513 g_assert_not_reached ();
9520 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9524 /* Tail prefix / tail call optimization */
9526 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9527 /* FIXME: runtime generic context pointer for jumps? */
9528 /* FIXME: handle this for generic sharing eventually */
9529 if ((ins_flag & MONO_INST_TAILCALL) &&
9530 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9531 supported_tail_call = TRUE;
9533 if (supported_tail_call) {
9536 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9537 INLINE_FAILURE ("tail call");
9539 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9541 if (cfg->backend->have_op_tail_call) {
9542 /* Handle tail calls similarly to normal calls */
9545 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9547 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9548 call->tail_call = TRUE;
9549 call->method = cmethod;
9550 call->signature = mono_method_signature (cmethod);
9553 * We implement tail calls by storing the actual arguments into the
9554 * argument variables, then emitting a CEE_JMP.
9556 for (i = 0; i < n; ++i) {
9557 /* Prevent argument from being register allocated */
9558 arg_array [i]->flags |= MONO_INST_VOLATILE;
9559 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9561 ins = (MonoInst*)call;
9562 ins->inst_p0 = cmethod;
9563 ins->inst_p1 = arg_array [0];
9564 MONO_ADD_INS (cfg->cbb, ins);
9565 link_bblock (cfg, cfg->cbb, end_bblock);
9566 start_new_bblock = 1;
9568 // FIXME: Eliminate unreachable epilogs
9571 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9572 * only reachable from this call.
9574 GET_BBLOCK (cfg, tblock, ip + 5);
9575 if (tblock == cfg->cbb || tblock->in_count == 0)
9584 * Synchronized wrappers.
9585 * Its hard to determine where to replace a method with its synchronized
9586 * wrapper without causing an infinite recursion. The current solution is
9587 * to add the synchronized wrapper in the trampolines, and to
9588 * change the called method to a dummy wrapper, and resolve that wrapper
9589 * to the real method in mono_jit_compile_method ().
9591 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9592 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9593 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9594 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9598 * Interface calls in llvm-only mode are complicated becase the callee might need an rgctx arg,
9599 * (i.e. its a vtype method), and there is no way to for the caller to know this at compile time.
9600 * So we make resolve_iface_call return the rgctx, and do two calls with different signatures
9601 * based on whenever there is an rgctx or not.
9603 if (cfg->llvm_only && virtual && cmethod && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9604 MonoInst *args_buf [16], *icall_args [16];
9606 MonoBasicBlock *rgctx_bb, *end_bb;
9607 MonoInst *call1, *call2, *call_target;
9608 MonoMethodSignature *rgctx_sig;
9609 int rgctx_reg, tmp_reg;
9611 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9613 NEW_BBLOCK (cfg, rgctx_bb);
9614 NEW_BBLOCK (cfg, end_bb);
9616 // FIXME: Optimize this
9618 guint32 imt_slot = mono_method_get_imt_slot (cmethod);
9620 icall_args [0] = sp [0];
9621 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
9623 icall_args [2] = imt_arg;
9625 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, cmethod);
9626 icall_args [2] = ins;
9629 rgctx_reg = alloc_preg (cfg);
9630 MONO_EMIT_NEW_PCONST (cfg, rgctx_reg, NULL);
9631 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], rgctx_reg, &mono_defaults.int_class->byval_arg);
9632 //EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
9634 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
9636 // FIXME: Only do this if needed (generic calls)
9638 // Check whenever to pass an rgctx
9639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
9640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, rgctx_bb);
9641 /* Non rgctx case */
9642 call1 = mono_emit_calli (cfg, fsig, sp, call_target, NULL, vtable_arg);
9643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9645 MONO_START_BB (cfg, rgctx_bb);
9646 /* Make a call with an rgctx */
9647 if (fsig->param_count + 2 < 16)
9650 args = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
9652 for (i = 0; i < fsig->param_count; ++i)
9653 args [i + 1] = sp [i + 1];
9654 tmp_reg = alloc_preg (cfg);
9655 EMIT_NEW_UNALU (cfg, args [fsig->param_count + 1], OP_MOVE, tmp_reg, rgctx_reg);
9656 rgctx_sig = sig_to_rgctx_sig (fsig);
9657 call2 = mono_emit_calli (cfg, rgctx_sig, args, call_target, NULL, NULL);
9658 call2->dreg = call1->dreg;
9659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9661 MONO_START_BB (cfg, end_bb);
9667 INLINE_FAILURE ("call");
9668 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9669 imt_arg, vtable_arg);
9671 if (tail_call && !cfg->llvm_only) {
9672 link_bblock (cfg, cfg->cbb, end_bblock);
9673 start_new_bblock = 1;
9675 // FIXME: Eliminate unreachable epilogs
9678 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9679 * only reachable from this call.
9681 GET_BBLOCK (cfg, tblock, ip + 5);
9682 if (tblock == cfg->cbb || tblock->in_count == 0)
9689 /* End of call, INS should contain the result of the call, if any */
9691 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9694 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9699 if (keep_this_alive) {
9700 MonoInst *dummy_use;
9702 /* See mono_emit_method_call_full () */
9703 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9706 CHECK_CFG_EXCEPTION;
9710 g_assert (*ip == CEE_RET);
9714 constrained_class = NULL;
9716 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9720 if (cfg->method != method) {
9721 /* return from inlined method */
9723 * If in_count == 0, that means the ret is unreachable due to
9724 * being preceeded by a throw. In that case, inline_method () will
9725 * handle setting the return value
9726 * (test case: test_0_inline_throw ()).
9728 if (return_var && cfg->cbb->in_count) {
9729 MonoType *ret_type = mono_method_signature (method)->ret;
9735 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9738 //g_assert (returnvar != -1);
9739 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9740 cfg->ret_var_set = TRUE;
9743 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9745 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9749 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9751 if (seq_points && !sym_seq_points) {
9753 * Place a seq point here too even through the IL stack is not
9754 * empty, so a step over on
9757 * will work correctly.
9759 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9760 MONO_ADD_INS (cfg->cbb, ins);
9763 g_assert (!return_var);
9767 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9770 emit_setret (cfg, *sp);
9773 if (sp != stack_start)
9775 MONO_INST_NEW (cfg, ins, OP_BR);
9777 ins->inst_target_bb = end_bblock;
9778 MONO_ADD_INS (cfg->cbb, ins);
9779 link_bblock (cfg, cfg->cbb, end_bblock);
9780 start_new_bblock = 1;
9784 MONO_INST_NEW (cfg, ins, OP_BR);
9786 target = ip + 1 + (signed char)(*ip);
9788 GET_BBLOCK (cfg, tblock, target);
9789 link_bblock (cfg, cfg->cbb, tblock);
9790 ins->inst_target_bb = tblock;
9791 if (sp != stack_start) {
9792 handle_stack_args (cfg, stack_start, sp - stack_start);
9794 CHECK_UNVERIFIABLE (cfg);
9796 MONO_ADD_INS (cfg->cbb, ins);
9797 start_new_bblock = 1;
9798 inline_costs += BRANCH_COST;
9812 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9814 target = ip + 1 + *(signed char*)ip;
9820 inline_costs += BRANCH_COST;
9824 MONO_INST_NEW (cfg, ins, OP_BR);
9827 target = ip + 4 + (gint32)read32(ip);
9829 GET_BBLOCK (cfg, tblock, target);
9830 link_bblock (cfg, cfg->cbb, tblock);
9831 ins->inst_target_bb = tblock;
9832 if (sp != stack_start) {
9833 handle_stack_args (cfg, stack_start, sp - stack_start);
9835 CHECK_UNVERIFIABLE (cfg);
9838 MONO_ADD_INS (cfg->cbb, ins);
9840 start_new_bblock = 1;
9841 inline_costs += BRANCH_COST;
9848 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9849 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9850 guint32 opsize = is_short ? 1 : 4;
9852 CHECK_OPSIZE (opsize);
9854 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9857 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9862 GET_BBLOCK (cfg, tblock, target);
9863 link_bblock (cfg, cfg->cbb, tblock);
9864 GET_BBLOCK (cfg, tblock, ip);
9865 link_bblock (cfg, cfg->cbb, tblock);
9867 if (sp != stack_start) {
9868 handle_stack_args (cfg, stack_start, sp - stack_start);
9869 CHECK_UNVERIFIABLE (cfg);
9872 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9873 cmp->sreg1 = sp [0]->dreg;
9874 type_from_op (cfg, cmp, sp [0], NULL);
9877 #if SIZEOF_REGISTER == 4
9878 if (cmp->opcode == OP_LCOMPARE_IMM) {
9879 /* Convert it to OP_LCOMPARE */
9880 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9881 ins->type = STACK_I8;
9882 ins->dreg = alloc_dreg (cfg, STACK_I8);
9884 MONO_ADD_INS (cfg->cbb, ins);
9885 cmp->opcode = OP_LCOMPARE;
9886 cmp->sreg2 = ins->dreg;
9889 MONO_ADD_INS (cfg->cbb, cmp);
9891 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9892 type_from_op (cfg, ins, sp [0], NULL);
9893 MONO_ADD_INS (cfg->cbb, ins);
9894 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9895 GET_BBLOCK (cfg, tblock, target);
9896 ins->inst_true_bb = tblock;
9897 GET_BBLOCK (cfg, tblock, ip);
9898 ins->inst_false_bb = tblock;
9899 start_new_bblock = 2;
9902 inline_costs += BRANCH_COST;
9917 MONO_INST_NEW (cfg, ins, *ip);
9919 target = ip + 4 + (gint32)read32(ip);
9925 inline_costs += BRANCH_COST;
9929 MonoBasicBlock **targets;
9930 MonoBasicBlock *default_bblock;
9931 MonoJumpInfoBBTable *table;
9932 int offset_reg = alloc_preg (cfg);
9933 int target_reg = alloc_preg (cfg);
9934 int table_reg = alloc_preg (cfg);
9935 int sum_reg = alloc_preg (cfg);
9936 gboolean use_op_switch;
9940 n = read32 (ip + 1);
9943 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9947 CHECK_OPSIZE (n * sizeof (guint32));
9948 target = ip + n * sizeof (guint32);
9950 GET_BBLOCK (cfg, default_bblock, target);
9951 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9953 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9954 for (i = 0; i < n; ++i) {
9955 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9956 targets [i] = tblock;
9957 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9961 if (sp != stack_start) {
9963 * Link the current bb with the targets as well, so handle_stack_args
9964 * will set their in_stack correctly.
9966 link_bblock (cfg, cfg->cbb, default_bblock);
9967 for (i = 0; i < n; ++i)
9968 link_bblock (cfg, cfg->cbb, targets [i]);
9970 handle_stack_args (cfg, stack_start, sp - stack_start);
9972 CHECK_UNVERIFIABLE (cfg);
9974 /* Undo the links */
9975 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9976 for (i = 0; i < n; ++i)
9977 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9980 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9981 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9983 for (i = 0; i < n; ++i)
9984 link_bblock (cfg, cfg->cbb, targets [i]);
9986 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9987 table->table = targets;
9988 table->table_size = n;
9990 use_op_switch = FALSE;
9992 /* ARM implements SWITCH statements differently */
9993 /* FIXME: Make it use the generic implementation */
9994 if (!cfg->compile_aot)
9995 use_op_switch = TRUE;
9998 if (COMPILE_LLVM (cfg))
9999 use_op_switch = TRUE;
10001 cfg->cbb->has_jump_table = 1;
10003 if (use_op_switch) {
10004 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10005 ins->sreg1 = src1->dreg;
10006 ins->inst_p0 = table;
10007 ins->inst_many_bb = targets;
10008 ins->klass = GUINT_TO_POINTER (n);
10009 MONO_ADD_INS (cfg->cbb, ins);
10011 if (sizeof (gpointer) == 8)
10012 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10014 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10016 #if SIZEOF_REGISTER == 8
10017 /* The upper word might not be zero, and we add it to a 64 bit address later */
10018 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10021 if (cfg->compile_aot) {
10022 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10024 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10025 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10026 ins->inst_p0 = table;
10027 ins->dreg = table_reg;
10028 MONO_ADD_INS (cfg->cbb, ins);
10031 /* FIXME: Use load_memindex */
10032 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10033 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10034 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10036 start_new_bblock = 1;
10037 inline_costs += (BRANCH_COST * 2);
10050 case CEE_LDIND_REF:
10057 dreg = alloc_freg (cfg);
10060 dreg = alloc_lreg (cfg);
10062 case CEE_LDIND_REF:
10063 dreg = alloc_ireg_ref (cfg);
10066 dreg = alloc_preg (cfg);
10069 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10070 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10071 if (*ip == CEE_LDIND_R4)
10072 ins->type = cfg->r4_stack_type;
10073 ins->flags |= ins_flag;
10074 MONO_ADD_INS (cfg->cbb, ins);
10076 if (ins_flag & MONO_INST_VOLATILE) {
10077 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10078 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10083 case CEE_STIND_REF:
10094 if (ins_flag & MONO_INST_VOLATILE) {
10095 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10096 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10099 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10100 ins->flags |= ins_flag;
10103 MONO_ADD_INS (cfg->cbb, ins);
10105 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10106 emit_write_barrier (cfg, sp [0], sp [1]);
10115 MONO_INST_NEW (cfg, ins, (*ip));
10117 ins->sreg1 = sp [0]->dreg;
10118 ins->sreg2 = sp [1]->dreg;
10119 type_from_op (cfg, ins, sp [0], sp [1]);
10121 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10123 /* Use the immediate opcodes if possible */
10124 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10125 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10126 if (imm_opcode != -1) {
10127 ins->opcode = imm_opcode;
10128 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10131 NULLIFY_INS (sp [1]);
10135 MONO_ADD_INS ((cfg)->cbb, (ins));
10137 *sp++ = mono_decompose_opcode (cfg, ins);
10154 MONO_INST_NEW (cfg, ins, (*ip));
10156 ins->sreg1 = sp [0]->dreg;
10157 ins->sreg2 = sp [1]->dreg;
10158 type_from_op (cfg, ins, sp [0], sp [1]);
10160 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10161 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10163 /* FIXME: Pass opcode to is_inst_imm */
10165 /* Use the immediate opcodes if possible */
10166 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10169 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10170 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10171 /* Keep emulated opcodes which are optimized away later */
10172 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10173 imm_opcode = mono_op_to_op_imm (ins->opcode);
10176 if (imm_opcode != -1) {
10177 ins->opcode = imm_opcode;
10178 if (sp [1]->opcode == OP_I8CONST) {
10179 #if SIZEOF_REGISTER == 8
10180 ins->inst_imm = sp [1]->inst_l;
10182 ins->inst_ls_word = sp [1]->inst_ls_word;
10183 ins->inst_ms_word = sp [1]->inst_ms_word;
10187 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10190 /* Might be followed by an instruction added by add_widen_op */
10191 if (sp [1]->next == NULL)
10192 NULLIFY_INS (sp [1]);
10195 MONO_ADD_INS ((cfg)->cbb, (ins));
10197 *sp++ = mono_decompose_opcode (cfg, ins);
10210 case CEE_CONV_OVF_I8:
10211 case CEE_CONV_OVF_U8:
10212 case CEE_CONV_R_UN:
10215 /* Special case this earlier so we have long constants in the IR */
10216 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10217 int data = sp [-1]->inst_c0;
10218 sp [-1]->opcode = OP_I8CONST;
10219 sp [-1]->type = STACK_I8;
10220 #if SIZEOF_REGISTER == 8
10221 if ((*ip) == CEE_CONV_U8)
10222 sp [-1]->inst_c0 = (guint32)data;
10224 sp [-1]->inst_c0 = data;
10226 sp [-1]->inst_ls_word = data;
10227 if ((*ip) == CEE_CONV_U8)
10228 sp [-1]->inst_ms_word = 0;
10230 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10232 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10239 case CEE_CONV_OVF_I4:
10240 case CEE_CONV_OVF_I1:
10241 case CEE_CONV_OVF_I2:
10242 case CEE_CONV_OVF_I:
10243 case CEE_CONV_OVF_U:
10246 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10247 ADD_UNOP (CEE_CONV_OVF_I8);
10254 case CEE_CONV_OVF_U1:
10255 case CEE_CONV_OVF_U2:
10256 case CEE_CONV_OVF_U4:
10259 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10260 ADD_UNOP (CEE_CONV_OVF_U8);
10267 case CEE_CONV_OVF_I1_UN:
10268 case CEE_CONV_OVF_I2_UN:
10269 case CEE_CONV_OVF_I4_UN:
10270 case CEE_CONV_OVF_I8_UN:
10271 case CEE_CONV_OVF_U1_UN:
10272 case CEE_CONV_OVF_U2_UN:
10273 case CEE_CONV_OVF_U4_UN:
10274 case CEE_CONV_OVF_U8_UN:
10275 case CEE_CONV_OVF_I_UN:
10276 case CEE_CONV_OVF_U_UN:
10283 CHECK_CFG_EXCEPTION;
10287 case CEE_ADD_OVF_UN:
10289 case CEE_MUL_OVF_UN:
10291 case CEE_SUB_OVF_UN:
10297 GSHAREDVT_FAILURE (*ip);
10300 token = read32 (ip + 1);
10301 klass = mini_get_class (method, token, generic_context);
10302 CHECK_TYPELOAD (klass);
10304 if (generic_class_is_reference_type (cfg, klass)) {
10305 MonoInst *store, *load;
10306 int dreg = alloc_ireg_ref (cfg);
10308 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10309 load->flags |= ins_flag;
10310 MONO_ADD_INS (cfg->cbb, load);
10312 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10313 store->flags |= ins_flag;
10314 MONO_ADD_INS (cfg->cbb, store);
10316 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10317 emit_write_barrier (cfg, sp [0], sp [1]);
10319 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10325 int loc_index = -1;
10331 token = read32 (ip + 1);
10332 klass = mini_get_class (method, token, generic_context);
10333 CHECK_TYPELOAD (klass);
10335 /* Optimize the common ldobj+stloc combination */
10338 loc_index = ip [6];
10345 loc_index = ip [5] - CEE_STLOC_0;
10352 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10353 CHECK_LOCAL (loc_index);
10355 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10356 ins->dreg = cfg->locals [loc_index]->dreg;
10357 ins->flags |= ins_flag;
10360 if (ins_flag & MONO_INST_VOLATILE) {
10361 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10362 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10368 /* Optimize the ldobj+stobj combination */
10369 /* The reference case ends up being a load+store anyway */
10370 /* Skip this if the operation is volatile. */
10371 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10376 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10383 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10384 ins->flags |= ins_flag;
10387 if (ins_flag & MONO_INST_VOLATILE) {
10388 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10389 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10398 CHECK_STACK_OVF (1);
10400 n = read32 (ip + 1);
10402 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10403 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10404 ins->type = STACK_OBJ;
10407 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10408 MonoInst *iargs [1];
10409 char *str = mono_method_get_wrapper_data (method, n);
10411 if (cfg->compile_aot)
10412 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10414 EMIT_NEW_PCONST (cfg, iargs [0], str);
10415 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10417 if (cfg->opt & MONO_OPT_SHARED) {
10418 MonoInst *iargs [3];
10420 if (cfg->compile_aot) {
10421 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10423 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10424 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10425 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10426 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10427 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10429 if (cfg->cbb->out_of_line) {
10430 MonoInst *iargs [2];
10432 if (image == mono_defaults.corlib) {
10434 * Avoid relocations in AOT and save some space by using a
10435 * version of helper_ldstr specialized to mscorlib.
10437 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10438 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10440 /* Avoid creating the string object */
10441 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10442 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10443 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10447 if (cfg->compile_aot) {
10448 NEW_LDSTRCONST (cfg, ins, image, n);
10450 MONO_ADD_INS (cfg->cbb, ins);
10453 NEW_PCONST (cfg, ins, NULL);
10454 ins->type = STACK_OBJ;
10455 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10457 OUT_OF_MEMORY_FAILURE;
10460 MONO_ADD_INS (cfg->cbb, ins);
10469 MonoInst *iargs [2];
10470 MonoMethodSignature *fsig;
10473 MonoInst *vtable_arg = NULL;
10476 token = read32 (ip + 1);
10477 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10478 if (!cmethod || mono_loader_get_last_error ())
10480 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10483 mono_save_token_info (cfg, image, token, cmethod);
10485 if (!mono_class_init (cmethod->klass))
10486 TYPE_LOAD_ERROR (cmethod->klass);
10488 context_used = mini_method_check_context_used (cfg, cmethod);
10490 if (mono_security_core_clr_enabled ())
10491 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10493 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10494 emit_class_init (cfg, cmethod->klass);
10495 CHECK_TYPELOAD (cmethod->klass);
10499 if (cfg->gsharedvt) {
10500 if (mini_is_gsharedvt_variable_signature (sig))
10501 GSHAREDVT_FAILURE (*ip);
10505 n = fsig->param_count;
10509 * Generate smaller code for the common newobj <exception> instruction in
10510 * argument checking code.
10512 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10513 is_exception_class (cmethod->klass) && n <= 2 &&
10514 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10515 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10516 MonoInst *iargs [3];
10520 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10523 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10526 iargs [1] = sp [0];
10527 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10530 iargs [1] = sp [0];
10531 iargs [2] = sp [1];
10532 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10535 g_assert_not_reached ();
10543 /* move the args to allow room for 'this' in the first position */
10549 /* check_call_signature () requires sp[0] to be set */
10550 this_ins.type = STACK_OBJ;
10551 sp [0] = &this_ins;
10552 if (check_call_signature (cfg, fsig, sp))
10557 if (mini_class_is_system_array (cmethod->klass)) {
10558 *sp = emit_get_rgctx_method (cfg, context_used,
10559 cmethod, MONO_RGCTX_INFO_METHOD);
10561 /* Avoid varargs in the common case */
10562 if (fsig->param_count == 1)
10563 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10564 else if (fsig->param_count == 2)
10565 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10566 else if (fsig->param_count == 3)
10567 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10568 else if (fsig->param_count == 4)
10569 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10571 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10572 } else if (cmethod->string_ctor) {
10573 g_assert (!context_used);
10574 g_assert (!vtable_arg);
10575 /* we simply pass a null pointer */
10576 EMIT_NEW_PCONST (cfg, *sp, NULL);
10577 /* now call the string ctor */
10578 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10580 if (cmethod->klass->valuetype) {
10581 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10582 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10583 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10588 * The code generated by mini_emit_virtual_call () expects
10589 * iargs [0] to be a boxed instance, but luckily the vcall
10590 * will be transformed into a normal call there.
10592 } else if (context_used) {
10593 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10596 MonoVTable *vtable = NULL;
10598 if (!cfg->compile_aot)
10599 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10600 CHECK_TYPELOAD (cmethod->klass);
10603 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10604 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10605 * As a workaround, we call class cctors before allocating objects.
10607 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10608 emit_class_init (cfg, cmethod->klass);
10609 if (cfg->verbose_level > 2)
10610 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10611 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10614 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10617 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10620 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10622 /* Now call the actual ctor */
10623 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10624 CHECK_CFG_EXCEPTION;
10627 if (alloc == NULL) {
10629 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10630 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10638 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10639 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10642 case CEE_CASTCLASS:
10646 token = read32 (ip + 1);
10647 klass = mini_get_class (method, token, generic_context);
10648 CHECK_TYPELOAD (klass);
10649 if (sp [0]->type != STACK_OBJ)
10652 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10653 CHECK_CFG_EXCEPTION;
10662 token = read32 (ip + 1);
10663 klass = mini_get_class (method, token, generic_context);
10664 CHECK_TYPELOAD (klass);
10665 if (sp [0]->type != STACK_OBJ)
10668 context_used = mini_class_check_context_used (cfg, klass);
10670 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10671 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10672 MonoInst *args [3];
10679 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10682 if (cfg->compile_aot) {
10683 idx = get_castclass_cache_idx (cfg);
10684 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10686 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10689 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10692 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10693 MonoMethod *mono_isinst;
10694 MonoInst *iargs [1];
10697 mono_isinst = mono_marshal_get_isinst (klass);
10698 iargs [0] = sp [0];
10700 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10701 iargs, ip, cfg->real_offset, TRUE);
10702 CHECK_CFG_EXCEPTION;
10703 g_assert (costs > 0);
10706 cfg->real_offset += 5;
10710 inline_costs += costs;
10713 ins = handle_isinst (cfg, klass, *sp, context_used);
10714 CHECK_CFG_EXCEPTION;
10720 case CEE_UNBOX_ANY: {
10721 MonoInst *res, *addr;
10726 token = read32 (ip + 1);
10727 klass = mini_get_class (method, token, generic_context);
10728 CHECK_TYPELOAD (klass);
10730 mono_save_token_info (cfg, image, token, klass);
10732 context_used = mini_class_check_context_used (cfg, klass);
10734 if (mini_is_gsharedvt_klass (klass)) {
10735 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10737 } else if (generic_class_is_reference_type (cfg, klass)) {
10738 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10739 CHECK_CFG_EXCEPTION;
10740 } else if (mono_class_is_nullable (klass)) {
10741 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10743 addr = handle_unbox (cfg, klass, sp, context_used);
10745 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10756 MonoClass *enum_class;
10757 MonoMethod *has_flag;
10763 token = read32 (ip + 1);
10764 klass = mini_get_class (method, token, generic_context);
10765 CHECK_TYPELOAD (klass);
10767 mono_save_token_info (cfg, image, token, klass);
10769 context_used = mini_class_check_context_used (cfg, klass);
10771 if (generic_class_is_reference_type (cfg, klass)) {
10777 if (klass == mono_defaults.void_class)
10779 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10781 /* frequent check in generic code: box (struct), brtrue */
10786 * <push int/long ptr>
10789 * constrained. MyFlags
10790 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10792 * If we find this sequence and the operand types on box and constrained
10793 * are equal, we can emit a specialized instruction sequence instead of
10794 * the very slow HasFlag () call.
10796 if ((cfg->opt & MONO_OPT_INTRINS) &&
10797 /* Cheap checks first. */
10798 ip + 5 + 6 + 5 < end &&
10799 ip [5] == CEE_PREFIX1 &&
10800 ip [6] == CEE_CONSTRAINED_ &&
10801 ip [11] == CEE_CALLVIRT &&
10802 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10803 mono_class_is_enum (klass) &&
10804 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10805 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10806 has_flag->klass == mono_defaults.enum_class &&
10807 !strcmp (has_flag->name, "HasFlag") &&
10808 has_flag->signature->hasthis &&
10809 has_flag->signature->param_count == 1) {
10810 CHECK_TYPELOAD (enum_class);
10812 if (enum_class == klass) {
10813 MonoInst *enum_this, *enum_flag;
10818 enum_this = sp [0];
10819 enum_flag = sp [1];
10821 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10826 // FIXME: LLVM can't handle the inconsistent bb linking
10827 if (!mono_class_is_nullable (klass) &&
10828 !mini_is_gsharedvt_klass (klass) &&
10829 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10830 (ip [5] == CEE_BRTRUE ||
10831 ip [5] == CEE_BRTRUE_S ||
10832 ip [5] == CEE_BRFALSE ||
10833 ip [5] == CEE_BRFALSE_S)) {
10834 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10836 MonoBasicBlock *true_bb, *false_bb;
10840 if (cfg->verbose_level > 3) {
10841 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10842 printf ("<box+brtrue opt>\n");
10847 case CEE_BRFALSE_S:
10850 target = ip + 1 + (signed char)(*ip);
10857 target = ip + 4 + (gint)(read32 (ip));
10861 g_assert_not_reached ();
10865 * We need to link both bblocks, since it is needed for handling stack
10866 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10867 * Branching to only one of them would lead to inconsistencies, so
10868 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10870 GET_BBLOCK (cfg, true_bb, target);
10871 GET_BBLOCK (cfg, false_bb, ip);
10873 mono_link_bblock (cfg, cfg->cbb, true_bb);
10874 mono_link_bblock (cfg, cfg->cbb, false_bb);
10876 if (sp != stack_start) {
10877 handle_stack_args (cfg, stack_start, sp - stack_start);
10879 CHECK_UNVERIFIABLE (cfg);
10882 if (COMPILE_LLVM (cfg)) {
10883 dreg = alloc_ireg (cfg);
10884 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10885 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10887 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10889 /* The JIT can't eliminate the iconst+compare */
10890 MONO_INST_NEW (cfg, ins, OP_BR);
10891 ins->inst_target_bb = is_true ? true_bb : false_bb;
10892 MONO_ADD_INS (cfg->cbb, ins);
10895 start_new_bblock = 1;
10899 *sp++ = handle_box (cfg, val, klass, context_used);
10901 CHECK_CFG_EXCEPTION;
10910 token = read32 (ip + 1);
10911 klass = mini_get_class (method, token, generic_context);
10912 CHECK_TYPELOAD (klass);
10914 mono_save_token_info (cfg, image, token, klass);
10916 context_used = mini_class_check_context_used (cfg, klass);
10918 if (mono_class_is_nullable (klass)) {
10921 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10922 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10926 ins = handle_unbox (cfg, klass, sp, context_used);
10939 MonoClassField *field;
10940 #ifndef DISABLE_REMOTING
10944 gboolean is_instance;
10946 gpointer addr = NULL;
10947 gboolean is_special_static;
10949 MonoInst *store_val = NULL;
10950 MonoInst *thread_ins;
10953 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10955 if (op == CEE_STFLD) {
10958 store_val = sp [1];
10963 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10965 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10968 if (op == CEE_STSFLD) {
10971 store_val = sp [0];
10976 token = read32 (ip + 1);
10977 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10978 field = mono_method_get_wrapper_data (method, token);
10979 klass = field->parent;
10982 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10985 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10986 FIELD_ACCESS_FAILURE (method, field);
10987 mono_class_init (klass);
10989 /* if the class is Critical then transparent code cannot access it's fields */
10990 if (!is_instance && mono_security_core_clr_enabled ())
10991 ensure_method_is_allowed_to_access_field (cfg, method, field);
10993 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10994 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10995 if (mono_security_core_clr_enabled ())
10996 ensure_method_is_allowed_to_access_field (cfg, method, field);
10999 ftype = mono_field_get_type (field);
11002 * LDFLD etc. is usable on static fields as well, so convert those cases to
11005 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11017 g_assert_not_reached ();
11019 is_instance = FALSE;
11022 context_used = mini_class_check_context_used (cfg, klass);
11024 /* INSTANCE CASE */
11026 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11027 if (op == CEE_STFLD) {
11028 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11030 #ifndef DISABLE_REMOTING
11031 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11032 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11033 MonoInst *iargs [5];
11035 GSHAREDVT_FAILURE (op);
11037 iargs [0] = sp [0];
11038 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11039 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11040 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11042 iargs [4] = sp [1];
11044 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11045 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11046 iargs, ip, cfg->real_offset, TRUE);
11047 CHECK_CFG_EXCEPTION;
11048 g_assert (costs > 0);
11050 cfg->real_offset += 5;
11052 inline_costs += costs;
11054 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11061 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11063 if (mini_is_gsharedvt_klass (klass)) {
11064 MonoInst *offset_ins;
11066 context_used = mini_class_check_context_used (cfg, klass);
11068 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11069 dreg = alloc_ireg_mp (cfg);
11070 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11071 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11072 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11074 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11076 if (sp [0]->opcode != OP_LDADDR)
11077 store->flags |= MONO_INST_FAULT;
11079 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11080 /* insert call to write barrier */
11084 dreg = alloc_ireg_mp (cfg);
11085 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11086 emit_write_barrier (cfg, ptr, sp [1]);
11089 store->flags |= ins_flag;
11096 #ifndef DISABLE_REMOTING
11097 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11098 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11099 MonoInst *iargs [4];
11101 GSHAREDVT_FAILURE (op);
11103 iargs [0] = sp [0];
11104 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11105 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11106 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11107 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11108 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11109 iargs, ip, cfg->real_offset, TRUE);
11110 CHECK_CFG_EXCEPTION;
11111 g_assert (costs > 0);
11113 cfg->real_offset += 5;
11117 inline_costs += costs;
11119 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11125 if (sp [0]->type == STACK_VTYPE) {
11128 /* Have to compute the address of the variable */
11130 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11132 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11134 g_assert (var->klass == klass);
11136 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11140 if (op == CEE_LDFLDA) {
11141 if (sp [0]->type == STACK_OBJ) {
11142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11143 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11146 dreg = alloc_ireg_mp (cfg);
11148 if (mini_is_gsharedvt_klass (klass)) {
11149 MonoInst *offset_ins;
11151 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11152 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11154 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11156 ins->klass = mono_class_from_mono_type (field->type);
11157 ins->type = STACK_MP;
11162 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11164 if (mini_is_gsharedvt_klass (klass)) {
11165 MonoInst *offset_ins;
11167 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11168 dreg = alloc_ireg_mp (cfg);
11169 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11170 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11172 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11174 load->flags |= ins_flag;
11175 if (sp [0]->opcode != OP_LDADDR)
11176 load->flags |= MONO_INST_FAULT;
11188 context_used = mini_class_check_context_used (cfg, klass);
11190 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11193 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11194 * to be called here.
11196 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11197 mono_class_vtable (cfg->domain, klass);
11198 CHECK_TYPELOAD (klass);
11200 mono_domain_lock (cfg->domain);
11201 if (cfg->domain->special_static_fields)
11202 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11203 mono_domain_unlock (cfg->domain);
11205 is_special_static = mono_class_field_is_special_static (field);
11207 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11208 thread_ins = mono_get_thread_intrinsic (cfg);
11212 /* Generate IR to compute the field address */
11213 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11215 * Fast access to TLS data
11216 * Inline version of get_thread_static_data () in
11220 int idx, static_data_reg, array_reg, dreg;
11222 GSHAREDVT_FAILURE (op);
11224 MONO_ADD_INS (cfg->cbb, thread_ins);
11225 static_data_reg = alloc_ireg (cfg);
11226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11228 if (cfg->compile_aot) {
11229 int offset_reg, offset2_reg, idx_reg;
11231 /* For TLS variables, this will return the TLS offset */
11232 EMIT_NEW_SFLDACONST (cfg, ins, field);
11233 offset_reg = ins->dreg;
11234 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11235 idx_reg = alloc_ireg (cfg);
11236 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11237 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11238 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11239 array_reg = alloc_ireg (cfg);
11240 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11241 offset2_reg = alloc_ireg (cfg);
11242 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11243 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11244 dreg = alloc_ireg (cfg);
11245 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11247 offset = (gsize)addr & 0x7fffffff;
11248 idx = offset & 0x3f;
11250 array_reg = alloc_ireg (cfg);
11251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11252 dreg = alloc_ireg (cfg);
11253 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11255 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11256 (cfg->compile_aot && is_special_static) ||
11257 (context_used && is_special_static)) {
11258 MonoInst *iargs [2];
11260 g_assert (field->parent);
11261 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11262 if (context_used) {
11263 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11264 field, MONO_RGCTX_INFO_CLASS_FIELD);
11266 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11268 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11269 } else if (context_used) {
11270 MonoInst *static_data;
11273 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11274 method->klass->name_space, method->klass->name, method->name,
11275 depth, field->offset);
11278 if (mono_class_needs_cctor_run (klass, method))
11279 emit_class_init (cfg, klass);
11282 * The pointer we're computing here is
11284 * super_info.static_data + field->offset
11286 static_data = emit_get_rgctx_klass (cfg, context_used,
11287 klass, MONO_RGCTX_INFO_STATIC_DATA);
11289 if (mini_is_gsharedvt_klass (klass)) {
11290 MonoInst *offset_ins;
11292 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11293 dreg = alloc_ireg_mp (cfg);
11294 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11295 } else if (field->offset == 0) {
11298 int addr_reg = mono_alloc_preg (cfg);
11299 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11301 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11302 MonoInst *iargs [2];
11304 g_assert (field->parent);
11305 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11306 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11307 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11309 MonoVTable *vtable = NULL;
11311 if (!cfg->compile_aot)
11312 vtable = mono_class_vtable (cfg->domain, klass);
11313 CHECK_TYPELOAD (klass);
11316 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11317 if (!(g_slist_find (class_inits, klass))) {
11318 emit_class_init (cfg, klass);
11319 if (cfg->verbose_level > 2)
11320 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11321 class_inits = g_slist_prepend (class_inits, klass);
11324 if (cfg->run_cctors) {
11326 /* This makes so that inline cannot trigger */
11327 /* .cctors: too many apps depend on them */
11328 /* running with a specific order... */
11330 if (! vtable->initialized)
11331 INLINE_FAILURE ("class init");
11332 ex = mono_runtime_class_init_full (vtable, FALSE);
11334 set_exception_object (cfg, ex);
11335 goto exception_exit;
11339 if (cfg->compile_aot)
11340 EMIT_NEW_SFLDACONST (cfg, ins, field);
11343 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11345 EMIT_NEW_PCONST (cfg, ins, addr);
11348 MonoInst *iargs [1];
11349 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11350 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11354 /* Generate IR to do the actual load/store operation */
11356 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11357 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11358 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11361 if (op == CEE_LDSFLDA) {
11362 ins->klass = mono_class_from_mono_type (ftype);
11363 ins->type = STACK_PTR;
11365 } else if (op == CEE_STSFLD) {
11368 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11369 store->flags |= ins_flag;
11371 gboolean is_const = FALSE;
11372 MonoVTable *vtable = NULL;
11373 gpointer addr = NULL;
11375 if (!context_used) {
11376 vtable = mono_class_vtable (cfg->domain, klass);
11377 CHECK_TYPELOAD (klass);
11379 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11380 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11381 int ro_type = ftype->type;
11383 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11384 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11385 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11388 GSHAREDVT_FAILURE (op);
11390 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11393 case MONO_TYPE_BOOLEAN:
11395 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11399 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11402 case MONO_TYPE_CHAR:
11404 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11408 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11413 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11417 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11422 case MONO_TYPE_PTR:
11423 case MONO_TYPE_FNPTR:
11424 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11425 type_to_eval_stack_type ((cfg), field->type, *sp);
11428 case MONO_TYPE_STRING:
11429 case MONO_TYPE_OBJECT:
11430 case MONO_TYPE_CLASS:
11431 case MONO_TYPE_SZARRAY:
11432 case MONO_TYPE_ARRAY:
11433 if (!mono_gc_is_moving ()) {
11434 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11435 type_to_eval_stack_type ((cfg), field->type, *sp);
11443 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11448 case MONO_TYPE_VALUETYPE:
11458 CHECK_STACK_OVF (1);
11460 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11461 load->flags |= ins_flag;
11467 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11468 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11469 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11480 token = read32 (ip + 1);
11481 klass = mini_get_class (method, token, generic_context);
11482 CHECK_TYPELOAD (klass);
11483 if (ins_flag & MONO_INST_VOLATILE) {
11484 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11485 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11487 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11488 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11489 ins->flags |= ins_flag;
11490 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11491 generic_class_is_reference_type (cfg, klass)) {
11492 /* insert call to write barrier */
11493 emit_write_barrier (cfg, sp [0], sp [1]);
11505 const char *data_ptr;
11507 guint32 field_token;
11513 token = read32 (ip + 1);
11515 klass = mini_get_class (method, token, generic_context);
11516 CHECK_TYPELOAD (klass);
11518 context_used = mini_class_check_context_used (cfg, klass);
11520 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11521 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11522 ins->sreg1 = sp [0]->dreg;
11523 ins->type = STACK_I4;
11524 ins->dreg = alloc_ireg (cfg);
11525 MONO_ADD_INS (cfg->cbb, ins);
11526 *sp = mono_decompose_opcode (cfg, ins);
11529 if (context_used) {
11530 MonoInst *args [3];
11531 MonoClass *array_class = mono_array_class_get (klass, 1);
11532 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11534 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11537 args [0] = emit_get_rgctx_klass (cfg, context_used,
11538 array_class, MONO_RGCTX_INFO_VTABLE);
11543 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11545 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11547 if (cfg->opt & MONO_OPT_SHARED) {
11548 /* Decompose now to avoid problems with references to the domainvar */
11549 MonoInst *iargs [3];
11551 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11552 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11553 iargs [2] = sp [0];
11555 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11557 /* Decompose later since it is needed by abcrem */
11558 MonoClass *array_type = mono_array_class_get (klass, 1);
11559 mono_class_vtable (cfg->domain, array_type);
11560 CHECK_TYPELOAD (array_type);
11562 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11563 ins->dreg = alloc_ireg_ref (cfg);
11564 ins->sreg1 = sp [0]->dreg;
11565 ins->inst_newa_class = klass;
11566 ins->type = STACK_OBJ;
11567 ins->klass = array_type;
11568 MONO_ADD_INS (cfg->cbb, ins);
11569 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11570 cfg->cbb->has_array_access = TRUE;
11572 /* Needed so mono_emit_load_get_addr () gets called */
11573 mono_get_got_var (cfg);
11583 * we inline/optimize the initialization sequence if possible.
11584 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11585 * for small sizes open code the memcpy
11586 * ensure the rva field is big enough
11588 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11589 MonoMethod *memcpy_method = get_memcpy_method ();
11590 MonoInst *iargs [3];
11591 int add_reg = alloc_ireg_mp (cfg);
11593 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11594 if (cfg->compile_aot) {
11595 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11597 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11599 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11600 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11609 if (sp [0]->type != STACK_OBJ)
11612 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11613 ins->dreg = alloc_preg (cfg);
11614 ins->sreg1 = sp [0]->dreg;
11615 ins->type = STACK_I4;
11616 /* This flag will be inherited by the decomposition */
11617 ins->flags |= MONO_INST_FAULT;
11618 MONO_ADD_INS (cfg->cbb, ins);
11619 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11620 cfg->cbb->has_array_access = TRUE;
11628 if (sp [0]->type != STACK_OBJ)
11631 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11633 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11634 CHECK_TYPELOAD (klass);
11635 /* we need to make sure that this array is exactly the type it needs
11636 * to be for correctness. the wrappers are lax with their usage
11637 * so we need to ignore them here
11639 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11640 MonoClass *array_class = mono_array_class_get (klass, 1);
11641 mini_emit_check_array_type (cfg, sp [0], array_class);
11642 CHECK_TYPELOAD (array_class);
11646 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11651 case CEE_LDELEM_I1:
11652 case CEE_LDELEM_U1:
11653 case CEE_LDELEM_I2:
11654 case CEE_LDELEM_U2:
11655 case CEE_LDELEM_I4:
11656 case CEE_LDELEM_U4:
11657 case CEE_LDELEM_I8:
11659 case CEE_LDELEM_R4:
11660 case CEE_LDELEM_R8:
11661 case CEE_LDELEM_REF: {
11667 if (*ip == CEE_LDELEM) {
11669 token = read32 (ip + 1);
11670 klass = mini_get_class (method, token, generic_context);
11671 CHECK_TYPELOAD (klass);
11672 mono_class_init (klass);
11675 klass = array_access_to_klass (*ip);
11677 if (sp [0]->type != STACK_OBJ)
11680 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11682 if (mini_is_gsharedvt_variable_klass (klass)) {
11683 // FIXME-VT: OP_ICONST optimization
11684 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11685 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11686 ins->opcode = OP_LOADV_MEMBASE;
11687 } else if (sp [1]->opcode == OP_ICONST) {
11688 int array_reg = sp [0]->dreg;
11689 int index_reg = sp [1]->dreg;
11690 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11692 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11693 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11695 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11696 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11699 if (*ip == CEE_LDELEM)
11706 case CEE_STELEM_I1:
11707 case CEE_STELEM_I2:
11708 case CEE_STELEM_I4:
11709 case CEE_STELEM_I8:
11710 case CEE_STELEM_R4:
11711 case CEE_STELEM_R8:
11712 case CEE_STELEM_REF:
11717 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11719 if (*ip == CEE_STELEM) {
11721 token = read32 (ip + 1);
11722 klass = mini_get_class (method, token, generic_context);
11723 CHECK_TYPELOAD (klass);
11724 mono_class_init (klass);
11727 klass = array_access_to_klass (*ip);
11729 if (sp [0]->type != STACK_OBJ)
11732 emit_array_store (cfg, klass, sp, TRUE);
11734 if (*ip == CEE_STELEM)
11741 case CEE_CKFINITE: {
11745 if (cfg->llvm_only) {
11746 MonoInst *iargs [1];
11748 iargs [0] = sp [0];
11749 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11751 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11752 ins->sreg1 = sp [0]->dreg;
11753 ins->dreg = alloc_freg (cfg);
11754 ins->type = STACK_R8;
11755 MONO_ADD_INS (cfg->cbb, ins);
11757 *sp++ = mono_decompose_opcode (cfg, ins);
11763 case CEE_REFANYVAL: {
11764 MonoInst *src_var, *src;
11766 int klass_reg = alloc_preg (cfg);
11767 int dreg = alloc_preg (cfg);
11769 GSHAREDVT_FAILURE (*ip);
11772 MONO_INST_NEW (cfg, ins, *ip);
11775 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11776 CHECK_TYPELOAD (klass);
11778 context_used = mini_class_check_context_used (cfg, klass);
11781 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11783 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11784 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11785 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11787 if (context_used) {
11788 MonoInst *klass_ins;
11790 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11791 klass, MONO_RGCTX_INFO_KLASS);
11794 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11795 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11797 mini_emit_class_check (cfg, klass_reg, klass);
11799 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11800 ins->type = STACK_MP;
11801 ins->klass = klass;
11806 case CEE_MKREFANY: {
11807 MonoInst *loc, *addr;
11809 GSHAREDVT_FAILURE (*ip);
11812 MONO_INST_NEW (cfg, ins, *ip);
11815 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11816 CHECK_TYPELOAD (klass);
11818 context_used = mini_class_check_context_used (cfg, klass);
11820 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11821 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11823 if (context_used) {
11824 MonoInst *const_ins;
11825 int type_reg = alloc_preg (cfg);
11827 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11829 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11830 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11831 } else if (cfg->compile_aot) {
11832 int const_reg = alloc_preg (cfg);
11833 int type_reg = alloc_preg (cfg);
11835 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11836 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11837 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11838 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11840 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11841 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11845 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11846 ins->type = STACK_VTYPE;
11847 ins->klass = mono_defaults.typed_reference_class;
11852 case CEE_LDTOKEN: {
11854 MonoClass *handle_class;
11856 CHECK_STACK_OVF (1);
11859 n = read32 (ip + 1);
11861 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11862 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11863 handle = mono_method_get_wrapper_data (method, n);
11864 handle_class = mono_method_get_wrapper_data (method, n + 1);
11865 if (handle_class == mono_defaults.typehandle_class)
11866 handle = &((MonoClass*)handle)->byval_arg;
11869 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11874 mono_class_init (handle_class);
11875 if (cfg->gshared) {
11876 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11877 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11878 /* This case handles ldtoken
11879 of an open type, like for
11882 } else if (handle_class == mono_defaults.typehandle_class) {
11883 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11884 } else if (handle_class == mono_defaults.fieldhandle_class)
11885 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11886 else if (handle_class == mono_defaults.methodhandle_class)
11887 context_used = mini_method_check_context_used (cfg, handle);
11889 g_assert_not_reached ();
11892 if ((cfg->opt & MONO_OPT_SHARED) &&
11893 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11894 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11895 MonoInst *addr, *vtvar, *iargs [3];
11896 int method_context_used;
11898 method_context_used = mini_method_check_context_used (cfg, method);
11900 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11902 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11903 EMIT_NEW_ICONST (cfg, iargs [1], n);
11904 if (method_context_used) {
11905 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11906 method, MONO_RGCTX_INFO_METHOD);
11907 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11909 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11910 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11912 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11914 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11916 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11918 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11919 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11920 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11921 (cmethod->klass == mono_defaults.systemtype_class) &&
11922 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11923 MonoClass *tclass = mono_class_from_mono_type (handle);
11925 mono_class_init (tclass);
11926 if (context_used) {
11927 ins = emit_get_rgctx_klass (cfg, context_used,
11928 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11929 } else if (cfg->compile_aot) {
11930 if (method->wrapper_type) {
11931 mono_error_init (&error); //got to do it since there are multiple conditionals below
11932 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11933 /* Special case for static synchronized wrappers */
11934 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11936 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11937 /* FIXME: n is not a normal token */
11939 EMIT_NEW_PCONST (cfg, ins, NULL);
11942 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11945 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11947 ins->type = STACK_OBJ;
11948 ins->klass = cmethod->klass;
11951 MonoInst *addr, *vtvar;
11953 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11955 if (context_used) {
11956 if (handle_class == mono_defaults.typehandle_class) {
11957 ins = emit_get_rgctx_klass (cfg, context_used,
11958 mono_class_from_mono_type (handle),
11959 MONO_RGCTX_INFO_TYPE);
11960 } else if (handle_class == mono_defaults.methodhandle_class) {
11961 ins = emit_get_rgctx_method (cfg, context_used,
11962 handle, MONO_RGCTX_INFO_METHOD);
11963 } else if (handle_class == mono_defaults.fieldhandle_class) {
11964 ins = emit_get_rgctx_field (cfg, context_used,
11965 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11967 g_assert_not_reached ();
11969 } else if (cfg->compile_aot) {
11970 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11972 EMIT_NEW_PCONST (cfg, ins, handle);
11974 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11975 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11976 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11986 MONO_INST_NEW (cfg, ins, OP_THROW);
11988 ins->sreg1 = sp [0]->dreg;
11990 cfg->cbb->out_of_line = TRUE;
11991 MONO_ADD_INS (cfg->cbb, ins);
11992 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11993 MONO_ADD_INS (cfg->cbb, ins);
11996 link_bblock (cfg, cfg->cbb, end_bblock);
11997 start_new_bblock = 1;
11998 /* This can complicate code generation for llvm since the return value might not be defined */
11999 if (COMPILE_LLVM (cfg))
12000 INLINE_FAILURE ("throw");
12002 case CEE_ENDFINALLY:
12003 /* mono_save_seq_point_info () depends on this */
12004 if (sp != stack_start)
12005 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12006 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12007 MONO_ADD_INS (cfg->cbb, ins);
12009 start_new_bblock = 1;
12012 * Control will leave the method so empty the stack, otherwise
12013 * the next basic block will start with a nonempty stack.
12015 while (sp != stack_start) {
12020 case CEE_LEAVE_S: {
12023 if (*ip == CEE_LEAVE) {
12025 target = ip + 5 + (gint32)read32(ip + 1);
12028 target = ip + 2 + (signed char)(ip [1]);
12031 /* empty the stack */
12032 while (sp != stack_start) {
12037 * If this leave statement is in a catch block, check for a
12038 * pending exception, and rethrow it if necessary.
12039 * We avoid doing this in runtime invoke wrappers, since those are called
12040 * by native code which excepts the wrapper to catch all exceptions.
12042 for (i = 0; i < header->num_clauses; ++i) {
12043 MonoExceptionClause *clause = &header->clauses [i];
12046 * Use <= in the final comparison to handle clauses with multiple
12047 * leave statements, like in bug #78024.
12048 * The ordering of the exception clauses guarantees that we find the
12049 * innermost clause.
12051 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12053 MonoBasicBlock *dont_throw;
12058 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12061 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12063 NEW_BBLOCK (cfg, dont_throw);
12066 * Currently, we always rethrow the abort exception, despite the
12067 * fact that this is not correct. See thread6.cs for an example.
12068 * But propagating the abort exception is more important than
12069 * getting the sematics right.
12071 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12072 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12073 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12075 MONO_START_BB (cfg, dont_throw);
12080 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12083 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12085 MonoExceptionClause *clause;
12087 for (tmp = handlers; tmp; tmp = tmp->next) {
12088 clause = tmp->data;
12089 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12091 link_bblock (cfg, cfg->cbb, tblock);
12092 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12093 ins->inst_target_bb = tblock;
12094 ins->inst_eh_block = clause;
12095 MONO_ADD_INS (cfg->cbb, ins);
12096 cfg->cbb->has_call_handler = 1;
12097 if (COMPILE_LLVM (cfg)) {
12098 MonoBasicBlock *target_bb;
12101 * Link the finally bblock with the target, since it will
12102 * conceptually branch there.
12103 * FIXME: Have to link the bblock containing the endfinally.
12105 GET_BBLOCK (cfg, target_bb, target);
12106 link_bblock (cfg, tblock, target_bb);
12109 g_list_free (handlers);
12112 MONO_INST_NEW (cfg, ins, OP_BR);
12113 MONO_ADD_INS (cfg->cbb, ins);
12114 GET_BBLOCK (cfg, tblock, target);
12115 link_bblock (cfg, cfg->cbb, tblock);
12116 ins->inst_target_bb = tblock;
12118 start_new_bblock = 1;
12120 if (*ip == CEE_LEAVE)
12129 * Mono specific opcodes
12131 case MONO_CUSTOM_PREFIX: {
12133 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12137 case CEE_MONO_ICALL: {
12139 MonoJitICallInfo *info;
12141 token = read32 (ip + 2);
12142 func = mono_method_get_wrapper_data (method, token);
12143 info = mono_find_jit_icall_by_addr (func);
12145 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12148 CHECK_STACK (info->sig->param_count);
12149 sp -= info->sig->param_count;
12151 ins = mono_emit_jit_icall (cfg, info->func, sp);
12152 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12156 inline_costs += 10 * num_calls++;
12160 case CEE_MONO_LDPTR_CARD_TABLE:
12161 case CEE_MONO_LDPTR_NURSERY_START:
12162 case CEE_MONO_LDPTR_NURSERY_BITS:
12163 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12164 CHECK_STACK_OVF (1);
12167 case CEE_MONO_LDPTR_CARD_TABLE:
12168 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
12170 case CEE_MONO_LDPTR_NURSERY_START:
12171 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_GC_NURSERY_START);
12173 case CEE_MONO_LDPTR_NURSERY_BITS:
12174 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_GC_NURSERY_BITS);
12176 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12177 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG);
12183 inline_costs += 10 * num_calls++;
12186 case CEE_MONO_LDPTR: {
12189 CHECK_STACK_OVF (1);
12191 token = read32 (ip + 2);
12193 ptr = mono_method_get_wrapper_data (method, token);
12194 EMIT_NEW_PCONST (cfg, ins, ptr);
12197 inline_costs += 10 * num_calls++;
12198 /* Can't embed random pointers into AOT code */
12202 case CEE_MONO_JIT_ICALL_ADDR: {
12203 MonoJitICallInfo *callinfo;
12206 CHECK_STACK_OVF (1);
12208 token = read32 (ip + 2);
12210 ptr = mono_method_get_wrapper_data (method, token);
12211 callinfo = mono_find_jit_icall_by_addr (ptr);
12212 g_assert (callinfo);
12213 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12216 inline_costs += 10 * num_calls++;
12219 case CEE_MONO_ICALL_ADDR: {
12220 MonoMethod *cmethod;
12223 CHECK_STACK_OVF (1);
12225 token = read32 (ip + 2);
12227 cmethod = mono_method_get_wrapper_data (method, token);
12229 if (cfg->compile_aot) {
12230 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12232 ptr = mono_lookup_internal_call (cmethod);
12234 EMIT_NEW_PCONST (cfg, ins, ptr);
12240 case CEE_MONO_VTADDR: {
12241 MonoInst *src_var, *src;
12247 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12248 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12253 case CEE_MONO_NEWOBJ: {
12254 MonoInst *iargs [2];
12256 CHECK_STACK_OVF (1);
12258 token = read32 (ip + 2);
12259 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12260 mono_class_init (klass);
12261 NEW_DOMAINCONST (cfg, iargs [0]);
12262 MONO_ADD_INS (cfg->cbb, iargs [0]);
12263 NEW_CLASSCONST (cfg, iargs [1], klass);
12264 MONO_ADD_INS (cfg->cbb, iargs [1]);
12265 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12267 inline_costs += 10 * num_calls++;
12270 case CEE_MONO_OBJADDR:
12273 MONO_INST_NEW (cfg, ins, OP_MOVE);
12274 ins->dreg = alloc_ireg_mp (cfg);
12275 ins->sreg1 = sp [0]->dreg;
12276 ins->type = STACK_MP;
12277 MONO_ADD_INS (cfg->cbb, ins);
12281 case CEE_MONO_LDNATIVEOBJ:
12283 * Similar to LDOBJ, but instead load the unmanaged
12284 * representation of the vtype to the stack.
12289 token = read32 (ip + 2);
12290 klass = mono_method_get_wrapper_data (method, token);
12291 g_assert (klass->valuetype);
12292 mono_class_init (klass);
12295 MonoInst *src, *dest, *temp;
12298 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12299 temp->backend.is_pinvoke = 1;
12300 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12301 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12303 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12304 dest->type = STACK_VTYPE;
12305 dest->klass = klass;
12311 case CEE_MONO_RETOBJ: {
12313 * Same as RET, but return the native representation of a vtype
12316 g_assert (cfg->ret);
12317 g_assert (mono_method_signature (method)->pinvoke);
12322 token = read32 (ip + 2);
12323 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12325 if (!cfg->vret_addr) {
12326 g_assert (cfg->ret_var_is_local);
12328 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12330 EMIT_NEW_RETLOADA (cfg, ins);
12332 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12334 if (sp != stack_start)
12337 MONO_INST_NEW (cfg, ins, OP_BR);
12338 ins->inst_target_bb = end_bblock;
12339 MONO_ADD_INS (cfg->cbb, ins);
12340 link_bblock (cfg, cfg->cbb, end_bblock);
12341 start_new_bblock = 1;
12345 case CEE_MONO_CISINST:
12346 case CEE_MONO_CCASTCLASS: {
12351 token = read32 (ip + 2);
12352 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12353 if (ip [1] == CEE_MONO_CISINST)
12354 ins = handle_cisinst (cfg, klass, sp [0]);
12356 ins = handle_ccastclass (cfg, klass, sp [0]);
12361 case CEE_MONO_SAVE_LMF:
12362 case CEE_MONO_RESTORE_LMF:
12365 case CEE_MONO_CLASSCONST:
12366 CHECK_STACK_OVF (1);
12368 token = read32 (ip + 2);
12369 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12372 inline_costs += 10 * num_calls++;
12374 case CEE_MONO_NOT_TAKEN:
12375 cfg->cbb->out_of_line = TRUE;
12378 case CEE_MONO_TLS: {
12381 CHECK_STACK_OVF (1);
12383 key = (gint32)read32 (ip + 2);
12384 g_assert (key < TLS_KEY_NUM);
12386 ins = mono_create_tls_get (cfg, key);
12388 if (cfg->compile_aot) {
12390 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12391 ins->dreg = alloc_preg (cfg);
12392 ins->type = STACK_PTR;
12394 g_assert_not_reached ();
12397 ins->type = STACK_PTR;
12398 MONO_ADD_INS (cfg->cbb, ins);
12403 case CEE_MONO_DYN_CALL: {
12404 MonoCallInst *call;
12406 /* It would be easier to call a trampoline, but that would put an
12407 * extra frame on the stack, confusing exception handling. So
12408 * implement it inline using an opcode for now.
12411 if (!cfg->dyn_call_var) {
12412 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12413 /* prevent it from being register allocated */
12414 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12417 /* Has to use a call inst since it local regalloc expects it */
12418 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12419 ins = (MonoInst*)call;
12421 ins->sreg1 = sp [0]->dreg;
12422 ins->sreg2 = sp [1]->dreg;
12423 MONO_ADD_INS (cfg->cbb, ins);
12425 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12428 inline_costs += 10 * num_calls++;
12432 case CEE_MONO_MEMORY_BARRIER: {
12434 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12438 case CEE_MONO_JIT_ATTACH: {
12439 MonoInst *args [16], *domain_ins;
12440 MonoInst *ad_ins, *jit_tls_ins;
12441 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12443 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12445 EMIT_NEW_PCONST (cfg, ins, NULL);
12446 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12448 ad_ins = mono_get_domain_intrinsic (cfg);
12449 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12451 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12452 NEW_BBLOCK (cfg, next_bb);
12453 NEW_BBLOCK (cfg, call_bb);
12455 if (cfg->compile_aot) {
12456 /* AOT code is only used in the root domain */
12457 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12459 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12461 MONO_ADD_INS (cfg->cbb, ad_ins);
12462 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12463 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12465 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12467 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12469 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12470 MONO_START_BB (cfg, call_bb);
12473 if (cfg->compile_aot) {
12474 /* AOT code is only used in the root domain */
12475 EMIT_NEW_PCONST (cfg, args [0], NULL);
12477 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12479 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12480 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12483 MONO_START_BB (cfg, next_bb);
12487 case CEE_MONO_JIT_DETACH: {
12488 MonoInst *args [16];
12490 /* Restore the original domain */
12491 dreg = alloc_ireg (cfg);
12492 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12493 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12498 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12504 case CEE_PREFIX1: {
12507 case CEE_ARGLIST: {
12508 /* somewhat similar to LDTOKEN */
12509 MonoInst *addr, *vtvar;
12510 CHECK_STACK_OVF (1);
12511 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12513 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12514 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12516 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12517 ins->type = STACK_VTYPE;
12518 ins->klass = mono_defaults.argumenthandle_class;
12528 MonoInst *cmp, *arg1, *arg2;
12536 * The following transforms:
12537 * CEE_CEQ into OP_CEQ
12538 * CEE_CGT into OP_CGT
12539 * CEE_CGT_UN into OP_CGT_UN
12540 * CEE_CLT into OP_CLT
12541 * CEE_CLT_UN into OP_CLT_UN
12543 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12545 MONO_INST_NEW (cfg, ins, cmp->opcode);
12546 cmp->sreg1 = arg1->dreg;
12547 cmp->sreg2 = arg2->dreg;
12548 type_from_op (cfg, cmp, arg1, arg2);
12550 add_widen_op (cfg, cmp, &arg1, &arg2);
12551 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12552 cmp->opcode = OP_LCOMPARE;
12553 else if (arg1->type == STACK_R4)
12554 cmp->opcode = OP_RCOMPARE;
12555 else if (arg1->type == STACK_R8)
12556 cmp->opcode = OP_FCOMPARE;
12558 cmp->opcode = OP_ICOMPARE;
12559 MONO_ADD_INS (cfg->cbb, cmp);
12560 ins->type = STACK_I4;
12561 ins->dreg = alloc_dreg (cfg, ins->type);
12562 type_from_op (cfg, ins, arg1, arg2);
12564 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12566 * The backends expect the fceq opcodes to do the
12569 ins->sreg1 = cmp->sreg1;
12570 ins->sreg2 = cmp->sreg2;
12573 MONO_ADD_INS (cfg->cbb, ins);
12579 MonoInst *argconst;
12580 MonoMethod *cil_method;
12582 CHECK_STACK_OVF (1);
12584 n = read32 (ip + 2);
12585 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12586 if (!cmethod || mono_loader_get_last_error ())
12588 mono_class_init (cmethod->klass);
12590 mono_save_token_info (cfg, image, n, cmethod);
12592 context_used = mini_method_check_context_used (cfg, cmethod);
12594 cil_method = cmethod;
12595 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12596 METHOD_ACCESS_FAILURE (method, cil_method);
12598 if (mono_security_core_clr_enabled ())
12599 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12602 * Optimize the common case of ldftn+delegate creation
12604 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12605 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12606 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12607 MonoInst *target_ins, *handle_ins;
12608 MonoMethod *invoke;
12609 int invoke_context_used;
12611 invoke = mono_get_delegate_invoke (ctor_method->klass);
12612 if (!invoke || !mono_method_signature (invoke))
12615 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12617 target_ins = sp [-1];
12619 if (mono_security_core_clr_enabled ())
12620 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12622 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12623 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12624 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12625 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12626 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12630 /* FIXME: SGEN support */
12631 if (invoke_context_used == 0) {
12633 if (cfg->verbose_level > 3)
12634 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12635 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12638 CHECK_CFG_EXCEPTION;
12648 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12649 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12653 inline_costs += 10 * num_calls++;
12656 case CEE_LDVIRTFTN: {
12657 MonoInst *args [2];
12661 n = read32 (ip + 2);
12662 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12663 if (!cmethod || mono_loader_get_last_error ())
12665 mono_class_init (cmethod->klass);
12667 context_used = mini_method_check_context_used (cfg, cmethod);
12669 if (mono_security_core_clr_enabled ())
12670 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12673 * Optimize the common case of ldvirtftn+delegate creation
12675 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12676 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12677 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12678 MonoInst *target_ins, *handle_ins;
12679 MonoMethod *invoke;
12680 int invoke_context_used;
12681 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12683 invoke = mono_get_delegate_invoke (ctor_method->klass);
12684 if (!invoke || !mono_method_signature (invoke))
12687 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12689 target_ins = sp [-1];
12691 if (mono_security_core_clr_enabled ())
12692 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12694 /* FIXME: SGEN support */
12695 if (invoke_context_used == 0 || cfg->llvm_only) {
12697 if (cfg->verbose_level > 3)
12698 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12699 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12702 CHECK_CFG_EXCEPTION;
12715 args [1] = emit_get_rgctx_method (cfg, context_used,
12716 cmethod, MONO_RGCTX_INFO_METHOD);
12719 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12721 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12724 inline_costs += 10 * num_calls++;
12728 CHECK_STACK_OVF (1);
12730 n = read16 (ip + 2);
12732 EMIT_NEW_ARGLOAD (cfg, ins, n);
12737 CHECK_STACK_OVF (1);
12739 n = read16 (ip + 2);
12741 NEW_ARGLOADA (cfg, ins, n);
12742 MONO_ADD_INS (cfg->cbb, ins);
12750 n = read16 (ip + 2);
12752 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12754 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12758 CHECK_STACK_OVF (1);
12760 n = read16 (ip + 2);
12762 EMIT_NEW_LOCLOAD (cfg, ins, n);
12767 unsigned char *tmp_ip;
12768 CHECK_STACK_OVF (1);
12770 n = read16 (ip + 2);
12773 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12779 EMIT_NEW_LOCLOADA (cfg, ins, n);
12788 n = read16 (ip + 2);
12790 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12792 emit_stloc_ir (cfg, sp, header, n);
12799 if (sp != stack_start)
12801 if (cfg->method != method)
12803 * Inlining this into a loop in a parent could lead to
12804 * stack overflows which is different behavior than the
12805 * non-inlined case, thus disable inlining in this case.
12807 INLINE_FAILURE("localloc");
12809 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12810 ins->dreg = alloc_preg (cfg);
12811 ins->sreg1 = sp [0]->dreg;
12812 ins->type = STACK_PTR;
12813 MONO_ADD_INS (cfg->cbb, ins);
12815 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12817 ins->flags |= MONO_INST_INIT;
12822 case CEE_ENDFILTER: {
12823 MonoExceptionClause *clause, *nearest;
12828 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12830 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12831 ins->sreg1 = (*sp)->dreg;
12832 MONO_ADD_INS (cfg->cbb, ins);
12833 start_new_bblock = 1;
12837 for (cc = 0; cc < header->num_clauses; ++cc) {
12838 clause = &header->clauses [cc];
12839 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12840 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12841 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12844 g_assert (nearest);
12845 if ((ip - header->code) != nearest->handler_offset)
12850 case CEE_UNALIGNED_:
12851 ins_flag |= MONO_INST_UNALIGNED;
12852 /* FIXME: record alignment? we can assume 1 for now */
12856 case CEE_VOLATILE_:
12857 ins_flag |= MONO_INST_VOLATILE;
12861 ins_flag |= MONO_INST_TAILCALL;
12862 cfg->flags |= MONO_CFG_HAS_TAIL;
12863 /* Can't inline tail calls at this time */
12864 inline_costs += 100000;
12871 token = read32 (ip + 2);
12872 klass = mini_get_class (method, token, generic_context);
12873 CHECK_TYPELOAD (klass);
12874 if (generic_class_is_reference_type (cfg, klass))
12875 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12877 mini_emit_initobj (cfg, *sp, NULL, klass);
12881 case CEE_CONSTRAINED_:
12883 token = read32 (ip + 2);
12884 constrained_class = mini_get_class (method, token, generic_context);
12885 CHECK_TYPELOAD (constrained_class);
12889 case CEE_INITBLK: {
12890 MonoInst *iargs [3];
12894 /* Skip optimized paths for volatile operations. */
12895 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12896 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12897 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12898 /* emit_memset only works when val == 0 */
12899 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12902 iargs [0] = sp [0];
12903 iargs [1] = sp [1];
12904 iargs [2] = sp [2];
12905 if (ip [1] == CEE_CPBLK) {
12907 * FIXME: It's unclear whether we should be emitting both the acquire
12908 * and release barriers for cpblk. It is technically both a load and
12909 * store operation, so it seems like that's the sensible thing to do.
12911 * FIXME: We emit full barriers on both sides of the operation for
12912 * simplicity. We should have a separate atomic memcpy method instead.
12914 MonoMethod *memcpy_method = get_memcpy_method ();
12916 if (ins_flag & MONO_INST_VOLATILE)
12917 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12919 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12920 call->flags |= ins_flag;
12922 if (ins_flag & MONO_INST_VOLATILE)
12923 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12925 MonoMethod *memset_method = get_memset_method ();
12926 if (ins_flag & MONO_INST_VOLATILE) {
12927 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12928 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12930 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12931 call->flags |= ins_flag;
12942 ins_flag |= MONO_INST_NOTYPECHECK;
12944 ins_flag |= MONO_INST_NORANGECHECK;
12945 /* we ignore the no-nullcheck for now since we
12946 * really do it explicitly only when doing callvirt->call
12950 case CEE_RETHROW: {
12952 int handler_offset = -1;
12954 for (i = 0; i < header->num_clauses; ++i) {
12955 MonoExceptionClause *clause = &header->clauses [i];
12956 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12957 handler_offset = clause->handler_offset;
12962 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12964 if (handler_offset == -1)
12967 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12968 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12969 ins->sreg1 = load->dreg;
12970 MONO_ADD_INS (cfg->cbb, ins);
12972 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12973 MONO_ADD_INS (cfg->cbb, ins);
12976 link_bblock (cfg, cfg->cbb, end_bblock);
12977 start_new_bblock = 1;
12985 CHECK_STACK_OVF (1);
12987 token = read32 (ip + 2);
12988 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12989 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12992 val = mono_type_size (type, &ialign);
12994 MonoClass *klass = mini_get_class (method, token, generic_context);
12995 CHECK_TYPELOAD (klass);
12997 val = mono_type_size (&klass->byval_arg, &ialign);
12999 if (mini_is_gsharedvt_klass (klass))
13000 GSHAREDVT_FAILURE (*ip);
13002 EMIT_NEW_ICONST (cfg, ins, val);
13007 case CEE_REFANYTYPE: {
13008 MonoInst *src_var, *src;
13010 GSHAREDVT_FAILURE (*ip);
13016 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13018 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13019 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13020 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13025 case CEE_READONLY_:
13038 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13048 g_warning ("opcode 0x%02x not handled", *ip);
13052 if (start_new_bblock != 1)
13055 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13056 if (cfg->cbb->next_bb) {
13057 /* This could already be set because of inlining, #693905 */
13058 MonoBasicBlock *bb = cfg->cbb;
13060 while (bb->next_bb)
13062 bb->next_bb = end_bblock;
13064 cfg->cbb->next_bb = end_bblock;
13067 if (cfg->method == method && cfg->domainvar) {
13069 MonoInst *get_domain;
13071 cfg->cbb = init_localsbb;
13073 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13074 MONO_ADD_INS (cfg->cbb, get_domain);
13076 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13078 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13079 MONO_ADD_INS (cfg->cbb, store);
13082 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13083 if (cfg->compile_aot)
13084 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13085 mono_get_got_var (cfg);
13088 if (cfg->method == method && cfg->got_var)
13089 mono_emit_load_got_addr (cfg);
13091 if (init_localsbb) {
13092 cfg->cbb = init_localsbb;
13094 for (i = 0; i < header->num_locals; ++i) {
13095 emit_init_local (cfg, i, header->locals [i], init_locals);
13099 if (cfg->init_ref_vars && cfg->method == method) {
13100 /* Emit initialization for ref vars */
13101 // FIXME: Avoid duplication initialization for IL locals.
13102 for (i = 0; i < cfg->num_varinfo; ++i) {
13103 MonoInst *ins = cfg->varinfo [i];
13105 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13106 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13110 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13111 cfg->cbb = init_localsbb;
13112 emit_push_lmf (cfg);
13115 cfg->cbb = init_localsbb;
13116 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13119 MonoBasicBlock *bb;
13122 * Make seq points at backward branch targets interruptable.
13124 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13125 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13126 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13129 /* Add a sequence point for method entry/exit events */
13130 if (seq_points && cfg->gen_sdb_seq_points) {
13131 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13132 MONO_ADD_INS (init_localsbb, ins);
13133 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13134 MONO_ADD_INS (cfg->bb_exit, ins);
13138 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13139 * the code they refer to was dead (#11880).
13141 if (sym_seq_points) {
13142 for (i = 0; i < header->code_size; ++i) {
13143 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13146 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13147 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13154 if (cfg->method == method) {
13155 MonoBasicBlock *bb;
13156 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13157 bb->region = mono_find_block_region (cfg, bb->real_offset);
13159 mono_create_spvar_for_region (cfg, bb->region);
13160 if (cfg->verbose_level > 2)
13161 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13165 if (inline_costs < 0) {
13168 /* Method is too large */
13169 mname = mono_method_full_name (method, TRUE);
13170 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13171 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13175 if ((cfg->verbose_level > 2) && (cfg->method == method))
13176 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13181 g_assert (!mono_error_ok (&cfg->error));
13185 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13189 set_exception_type_from_invalid_il (cfg, method, ip);
13193 g_slist_free (class_inits);
13194 mono_basic_block_free (original_bb);
13195 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13196 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13197 if (cfg->exception_type)
13200 return inline_costs;
13204 store_membase_reg_to_store_membase_imm (int opcode)
13207 case OP_STORE_MEMBASE_REG:
13208 return OP_STORE_MEMBASE_IMM;
13209 case OP_STOREI1_MEMBASE_REG:
13210 return OP_STOREI1_MEMBASE_IMM;
13211 case OP_STOREI2_MEMBASE_REG:
13212 return OP_STOREI2_MEMBASE_IMM;
13213 case OP_STOREI4_MEMBASE_REG:
13214 return OP_STOREI4_MEMBASE_IMM;
13215 case OP_STOREI8_MEMBASE_REG:
13216 return OP_STOREI8_MEMBASE_IMM;
13218 g_assert_not_reached ();
13225 mono_op_to_op_imm (int opcode)
13229 return OP_IADD_IMM;
13231 return OP_ISUB_IMM;
13233 return OP_IDIV_IMM;
13235 return OP_IDIV_UN_IMM;
13237 return OP_IREM_IMM;
13239 return OP_IREM_UN_IMM;
13241 return OP_IMUL_IMM;
13243 return OP_IAND_IMM;
13247 return OP_IXOR_IMM;
13249 return OP_ISHL_IMM;
13251 return OP_ISHR_IMM;
13253 return OP_ISHR_UN_IMM;
13256 return OP_LADD_IMM;
13258 return OP_LSUB_IMM;
13260 return OP_LAND_IMM;
13264 return OP_LXOR_IMM;
13266 return OP_LSHL_IMM;
13268 return OP_LSHR_IMM;
13270 return OP_LSHR_UN_IMM;
13271 #if SIZEOF_REGISTER == 8
13273 return OP_LREM_IMM;
13277 return OP_COMPARE_IMM;
13279 return OP_ICOMPARE_IMM;
13281 return OP_LCOMPARE_IMM;
13283 case OP_STORE_MEMBASE_REG:
13284 return OP_STORE_MEMBASE_IMM;
13285 case OP_STOREI1_MEMBASE_REG:
13286 return OP_STOREI1_MEMBASE_IMM;
13287 case OP_STOREI2_MEMBASE_REG:
13288 return OP_STOREI2_MEMBASE_IMM;
13289 case OP_STOREI4_MEMBASE_REG:
13290 return OP_STOREI4_MEMBASE_IMM;
13292 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13294 return OP_X86_PUSH_IMM;
13295 case OP_X86_COMPARE_MEMBASE_REG:
13296 return OP_X86_COMPARE_MEMBASE_IMM;
13298 #if defined(TARGET_AMD64)
13299 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13300 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13302 case OP_VOIDCALL_REG:
13303 return OP_VOIDCALL;
13311 return OP_LOCALLOC_IMM;
13318 ldind_to_load_membase (int opcode)
13322 return OP_LOADI1_MEMBASE;
13324 return OP_LOADU1_MEMBASE;
13326 return OP_LOADI2_MEMBASE;
13328 return OP_LOADU2_MEMBASE;
13330 return OP_LOADI4_MEMBASE;
13332 return OP_LOADU4_MEMBASE;
13334 return OP_LOAD_MEMBASE;
13335 case CEE_LDIND_REF:
13336 return OP_LOAD_MEMBASE;
13338 return OP_LOADI8_MEMBASE;
13340 return OP_LOADR4_MEMBASE;
13342 return OP_LOADR8_MEMBASE;
13344 g_assert_not_reached ();
13351 stind_to_store_membase (int opcode)
13355 return OP_STOREI1_MEMBASE_REG;
13357 return OP_STOREI2_MEMBASE_REG;
13359 return OP_STOREI4_MEMBASE_REG;
13361 case CEE_STIND_REF:
13362 return OP_STORE_MEMBASE_REG;
13364 return OP_STOREI8_MEMBASE_REG;
13366 return OP_STORER4_MEMBASE_REG;
13368 return OP_STORER8_MEMBASE_REG;
13370 g_assert_not_reached ();
13377 mono_load_membase_to_load_mem (int opcode)
13379 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13380 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13382 case OP_LOAD_MEMBASE:
13383 return OP_LOAD_MEM;
13384 case OP_LOADU1_MEMBASE:
13385 return OP_LOADU1_MEM;
13386 case OP_LOADU2_MEMBASE:
13387 return OP_LOADU2_MEM;
13388 case OP_LOADI4_MEMBASE:
13389 return OP_LOADI4_MEM;
13390 case OP_LOADU4_MEMBASE:
13391 return OP_LOADU4_MEM;
13392 #if SIZEOF_REGISTER == 8
13393 case OP_LOADI8_MEMBASE:
13394 return OP_LOADI8_MEM;
13403 op_to_op_dest_membase (int store_opcode, int opcode)
13405 #if defined(TARGET_X86)
13406 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13411 return OP_X86_ADD_MEMBASE_REG;
13413 return OP_X86_SUB_MEMBASE_REG;
13415 return OP_X86_AND_MEMBASE_REG;
13417 return OP_X86_OR_MEMBASE_REG;
13419 return OP_X86_XOR_MEMBASE_REG;
13422 return OP_X86_ADD_MEMBASE_IMM;
13425 return OP_X86_SUB_MEMBASE_IMM;
13428 return OP_X86_AND_MEMBASE_IMM;
13431 return OP_X86_OR_MEMBASE_IMM;
13434 return OP_X86_XOR_MEMBASE_IMM;
13440 #if defined(TARGET_AMD64)
13441 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13446 return OP_X86_ADD_MEMBASE_REG;
13448 return OP_X86_SUB_MEMBASE_REG;
13450 return OP_X86_AND_MEMBASE_REG;
13452 return OP_X86_OR_MEMBASE_REG;
13454 return OP_X86_XOR_MEMBASE_REG;
13456 return OP_X86_ADD_MEMBASE_IMM;
13458 return OP_X86_SUB_MEMBASE_IMM;
13460 return OP_X86_AND_MEMBASE_IMM;
13462 return OP_X86_OR_MEMBASE_IMM;
13464 return OP_X86_XOR_MEMBASE_IMM;
13466 return OP_AMD64_ADD_MEMBASE_REG;
13468 return OP_AMD64_SUB_MEMBASE_REG;
13470 return OP_AMD64_AND_MEMBASE_REG;
13472 return OP_AMD64_OR_MEMBASE_REG;
13474 return OP_AMD64_XOR_MEMBASE_REG;
13477 return OP_AMD64_ADD_MEMBASE_IMM;
13480 return OP_AMD64_SUB_MEMBASE_IMM;
13483 return OP_AMD64_AND_MEMBASE_IMM;
13486 return OP_AMD64_OR_MEMBASE_IMM;
13489 return OP_AMD64_XOR_MEMBASE_IMM;
13499 op_to_op_store_membase (int store_opcode, int opcode)
13501 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13504 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13505 return OP_X86_SETEQ_MEMBASE;
13507 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13508 return OP_X86_SETNE_MEMBASE;
13516 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13519 /* FIXME: This has sign extension issues */
13521 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13522 return OP_X86_COMPARE_MEMBASE8_IMM;
13525 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13530 return OP_X86_PUSH_MEMBASE;
13531 case OP_COMPARE_IMM:
13532 case OP_ICOMPARE_IMM:
13533 return OP_X86_COMPARE_MEMBASE_IMM;
13536 return OP_X86_COMPARE_MEMBASE_REG;
13540 #ifdef TARGET_AMD64
13541 /* FIXME: This has sign extension issues */
13543 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13544 return OP_X86_COMPARE_MEMBASE8_IMM;
13549 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13550 return OP_X86_PUSH_MEMBASE;
13552 /* FIXME: This only works for 32 bit immediates
13553 case OP_COMPARE_IMM:
13554 case OP_LCOMPARE_IMM:
13555 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13556 return OP_AMD64_COMPARE_MEMBASE_IMM;
13558 case OP_ICOMPARE_IMM:
13559 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13560 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13564 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13565 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13566 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13567 return OP_AMD64_COMPARE_MEMBASE_REG;
13570 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13571 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13580 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13583 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13589 return OP_X86_COMPARE_REG_MEMBASE;
13591 return OP_X86_ADD_REG_MEMBASE;
13593 return OP_X86_SUB_REG_MEMBASE;
13595 return OP_X86_AND_REG_MEMBASE;
13597 return OP_X86_OR_REG_MEMBASE;
13599 return OP_X86_XOR_REG_MEMBASE;
13603 #ifdef TARGET_AMD64
13604 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13607 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13609 return OP_X86_ADD_REG_MEMBASE;
13611 return OP_X86_SUB_REG_MEMBASE;
13613 return OP_X86_AND_REG_MEMBASE;
13615 return OP_X86_OR_REG_MEMBASE;
13617 return OP_X86_XOR_REG_MEMBASE;
13619 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13623 return OP_AMD64_COMPARE_REG_MEMBASE;
13625 return OP_AMD64_ADD_REG_MEMBASE;
13627 return OP_AMD64_SUB_REG_MEMBASE;
13629 return OP_AMD64_AND_REG_MEMBASE;
13631 return OP_AMD64_OR_REG_MEMBASE;
13633 return OP_AMD64_XOR_REG_MEMBASE;
13642 mono_op_to_op_imm_noemul (int opcode)
13645 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13651 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13658 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13663 return mono_op_to_op_imm (opcode);
13668 * mono_handle_global_vregs:
13670 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13674 mono_handle_global_vregs (MonoCompile *cfg)
13676 gint32 *vreg_to_bb;
13677 MonoBasicBlock *bb;
13680 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13682 #ifdef MONO_ARCH_SIMD_INTRINSICS
13683 if (cfg->uses_simd_intrinsics)
13684 mono_simd_simplify_indirection (cfg);
13687 /* Find local vregs used in more than one bb */
13688 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13689 MonoInst *ins = bb->code;
13690 int block_num = bb->block_num;
13692 if (cfg->verbose_level > 2)
13693 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13696 for (; ins; ins = ins->next) {
13697 const char *spec = INS_INFO (ins->opcode);
13698 int regtype = 0, regindex;
13701 if (G_UNLIKELY (cfg->verbose_level > 2))
13702 mono_print_ins (ins);
13704 g_assert (ins->opcode >= MONO_CEE_LAST);
13706 for (regindex = 0; regindex < 4; regindex ++) {
13709 if (regindex == 0) {
13710 regtype = spec [MONO_INST_DEST];
13711 if (regtype == ' ')
13714 } else if (regindex == 1) {
13715 regtype = spec [MONO_INST_SRC1];
13716 if (regtype == ' ')
13719 } else if (regindex == 2) {
13720 regtype = spec [MONO_INST_SRC2];
13721 if (regtype == ' ')
13724 } else if (regindex == 3) {
13725 regtype = spec [MONO_INST_SRC3];
13726 if (regtype == ' ')
13731 #if SIZEOF_REGISTER == 4
13732 /* In the LLVM case, the long opcodes are not decomposed */
13733 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13735 * Since some instructions reference the original long vreg,
13736 * and some reference the two component vregs, it is quite hard
13737 * to determine when it needs to be global. So be conservative.
13739 if (!get_vreg_to_inst (cfg, vreg)) {
13740 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13742 if (cfg->verbose_level > 2)
13743 printf ("LONG VREG R%d made global.\n", vreg);
13747 * Make the component vregs volatile since the optimizations can
13748 * get confused otherwise.
13750 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13751 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13755 g_assert (vreg != -1);
13757 prev_bb = vreg_to_bb [vreg];
13758 if (prev_bb == 0) {
13759 /* 0 is a valid block num */
13760 vreg_to_bb [vreg] = block_num + 1;
13761 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13762 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13765 if (!get_vreg_to_inst (cfg, vreg)) {
13766 if (G_UNLIKELY (cfg->verbose_level > 2))
13767 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13771 if (vreg_is_ref (cfg, vreg))
13772 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13774 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13777 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13780 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13783 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13786 g_assert_not_reached ();
13790 /* Flag as having been used in more than one bb */
13791 vreg_to_bb [vreg] = -1;
13797 /* If a variable is used in only one bblock, convert it into a local vreg */
13798 for (i = 0; i < cfg->num_varinfo; i++) {
13799 MonoInst *var = cfg->varinfo [i];
13800 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13802 switch (var->type) {
13808 #if SIZEOF_REGISTER == 8
13811 #if !defined(TARGET_X86)
13812 /* Enabling this screws up the fp stack on x86 */
13815 if (mono_arch_is_soft_float ())
13818 /* Arguments are implicitly global */
13819 /* Putting R4 vars into registers doesn't work currently */
13820 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13821 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13823 * Make that the variable's liveness interval doesn't contain a call, since
13824 * that would cause the lvreg to be spilled, making the whole optimization
13827 /* This is too slow for JIT compilation */
13829 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13831 int def_index, call_index, ins_index;
13832 gboolean spilled = FALSE;
13837 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13838 const char *spec = INS_INFO (ins->opcode);
13840 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13841 def_index = ins_index;
13843 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13844 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13845 if (call_index > def_index) {
13851 if (MONO_IS_CALL (ins))
13852 call_index = ins_index;
13862 if (G_UNLIKELY (cfg->verbose_level > 2))
13863 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13864 var->flags |= MONO_INST_IS_DEAD;
13865 cfg->vreg_to_inst [var->dreg] = NULL;
13872 * Compress the varinfo and vars tables so the liveness computation is faster and
13873 * takes up less space.
13876 for (i = 0; i < cfg->num_varinfo; ++i) {
13877 MonoInst *var = cfg->varinfo [i];
13878 if (pos < i && cfg->locals_start == i)
13879 cfg->locals_start = pos;
13880 if (!(var->flags & MONO_INST_IS_DEAD)) {
13882 cfg->varinfo [pos] = cfg->varinfo [i];
13883 cfg->varinfo [pos]->inst_c0 = pos;
13884 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13885 cfg->vars [pos].idx = pos;
13886 #if SIZEOF_REGISTER == 4
13887 if (cfg->varinfo [pos]->type == STACK_I8) {
13888 /* Modify the two component vars too */
13891 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13892 var1->inst_c0 = pos;
13893 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13894 var1->inst_c0 = pos;
13901 cfg->num_varinfo = pos;
13902 if (cfg->locals_start > cfg->num_varinfo)
13903 cfg->locals_start = cfg->num_varinfo;
13907 * mono_spill_global_vars:
13909 * Generate spill code for variables which are not allocated to registers,
13910 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13911 * code is generated which could be optimized by the local optimization passes.
13914 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13916 MonoBasicBlock *bb;
13918 int orig_next_vreg;
13919 guint32 *vreg_to_lvreg;
13921 guint32 i, lvregs_len;
13922 gboolean dest_has_lvreg = FALSE;
13923 guint32 stacktypes [128];
13924 MonoInst **live_range_start, **live_range_end;
13925 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13926 int *gsharedvt_vreg_to_idx = NULL;
13928 *need_local_opts = FALSE;
13930 memset (spec2, 0, sizeof (spec2));
13932 /* FIXME: Move this function to mini.c */
13933 stacktypes ['i'] = STACK_PTR;
13934 stacktypes ['l'] = STACK_I8;
13935 stacktypes ['f'] = STACK_R8;
13936 #ifdef MONO_ARCH_SIMD_INTRINSICS
13937 stacktypes ['x'] = STACK_VTYPE;
13940 #if SIZEOF_REGISTER == 4
13941 /* Create MonoInsts for longs */
13942 for (i = 0; i < cfg->num_varinfo; i++) {
13943 MonoInst *ins = cfg->varinfo [i];
13945 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13946 switch (ins->type) {
13951 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13954 g_assert (ins->opcode == OP_REGOFFSET);
13956 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13958 tree->opcode = OP_REGOFFSET;
13959 tree->inst_basereg = ins->inst_basereg;
13960 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13962 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13964 tree->opcode = OP_REGOFFSET;
13965 tree->inst_basereg = ins->inst_basereg;
13966 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13976 if (cfg->compute_gc_maps) {
13977 /* registers need liveness info even for !non refs */
13978 for (i = 0; i < cfg->num_varinfo; i++) {
13979 MonoInst *ins = cfg->varinfo [i];
13981 if (ins->opcode == OP_REGVAR)
13982 ins->flags |= MONO_INST_GC_TRACK;
13986 if (cfg->gsharedvt) {
13987 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13989 for (i = 0; i < cfg->num_varinfo; ++i) {
13990 MonoInst *ins = cfg->varinfo [i];
13993 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13994 if (i >= cfg->locals_start) {
13996 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13997 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13998 ins->opcode = OP_GSHAREDVT_LOCAL;
13999 ins->inst_imm = idx;
14002 gsharedvt_vreg_to_idx [ins->dreg] = -1;
14003 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14009 /* FIXME: widening and truncation */
14012 * As an optimization, when a variable allocated to the stack is first loaded into
14013 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14014 * the variable again.
14016 orig_next_vreg = cfg->next_vreg;
14017 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14018 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14022 * These arrays contain the first and last instructions accessing a given
14024 * Since we emit bblocks in the same order we process them here, and we
14025 * don't split live ranges, these will precisely describe the live range of
14026 * the variable, i.e. the instruction range where a valid value can be found
14027 * in the variables location.
14028 * The live range is computed using the liveness info computed by the liveness pass.
14029 * We can't use vmv->range, since that is an abstract live range, and we need
14030 * one which is instruction precise.
14031 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14033 /* FIXME: Only do this if debugging info is requested */
14034 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14035 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14036 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14037 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14039 /* Add spill loads/stores */
14040 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14043 if (cfg->verbose_level > 2)
14044 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14046 /* Clear vreg_to_lvreg array */
14047 for (i = 0; i < lvregs_len; i++)
14048 vreg_to_lvreg [lvregs [i]] = 0;
14052 MONO_BB_FOR_EACH_INS (bb, ins) {
14053 const char *spec = INS_INFO (ins->opcode);
14054 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14055 gboolean store, no_lvreg;
14056 int sregs [MONO_MAX_SRC_REGS];
14058 if (G_UNLIKELY (cfg->verbose_level > 2))
14059 mono_print_ins (ins);
14061 if (ins->opcode == OP_NOP)
14065 * We handle LDADDR here as well, since it can only be decomposed
14066 * when variable addresses are known.
14068 if (ins->opcode == OP_LDADDR) {
14069 MonoInst *var = ins->inst_p0;
14071 if (var->opcode == OP_VTARG_ADDR) {
14072 /* Happens on SPARC/S390 where vtypes are passed by reference */
14073 MonoInst *vtaddr = var->inst_left;
14074 if (vtaddr->opcode == OP_REGVAR) {
14075 ins->opcode = OP_MOVE;
14076 ins->sreg1 = vtaddr->dreg;
14078 else if (var->inst_left->opcode == OP_REGOFFSET) {
14079 ins->opcode = OP_LOAD_MEMBASE;
14080 ins->inst_basereg = vtaddr->inst_basereg;
14081 ins->inst_offset = vtaddr->inst_offset;
14084 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14085 /* gsharedvt arg passed by ref */
14086 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14088 ins->opcode = OP_LOAD_MEMBASE;
14089 ins->inst_basereg = var->inst_basereg;
14090 ins->inst_offset = var->inst_offset;
14091 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14092 MonoInst *load, *load2, *load3;
14093 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14094 int reg1, reg2, reg3;
14095 MonoInst *info_var = cfg->gsharedvt_info_var;
14096 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14100 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14103 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14105 g_assert (info_var);
14106 g_assert (locals_var);
14108 /* Mark the instruction used to compute the locals var as used */
14109 cfg->gsharedvt_locals_var_ins = NULL;
14111 /* Load the offset */
14112 if (info_var->opcode == OP_REGOFFSET) {
14113 reg1 = alloc_ireg (cfg);
14114 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14115 } else if (info_var->opcode == OP_REGVAR) {
14117 reg1 = info_var->dreg;
14119 g_assert_not_reached ();
14121 reg2 = alloc_ireg (cfg);
14122 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14123 /* Load the locals area address */
14124 reg3 = alloc_ireg (cfg);
14125 if (locals_var->opcode == OP_REGOFFSET) {
14126 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14127 } else if (locals_var->opcode == OP_REGVAR) {
14128 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14130 g_assert_not_reached ();
14132 /* Compute the address */
14133 ins->opcode = OP_PADD;
14137 mono_bblock_insert_before_ins (bb, ins, load3);
14138 mono_bblock_insert_before_ins (bb, load3, load2);
14140 mono_bblock_insert_before_ins (bb, load2, load);
14142 g_assert (var->opcode == OP_REGOFFSET);
14144 ins->opcode = OP_ADD_IMM;
14145 ins->sreg1 = var->inst_basereg;
14146 ins->inst_imm = var->inst_offset;
14149 *need_local_opts = TRUE;
14150 spec = INS_INFO (ins->opcode);
14153 if (ins->opcode < MONO_CEE_LAST) {
14154 mono_print_ins (ins);
14155 g_assert_not_reached ();
14159 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14163 if (MONO_IS_STORE_MEMBASE (ins)) {
14164 tmp_reg = ins->dreg;
14165 ins->dreg = ins->sreg2;
14166 ins->sreg2 = tmp_reg;
14169 spec2 [MONO_INST_DEST] = ' ';
14170 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14171 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14172 spec2 [MONO_INST_SRC3] = ' ';
14174 } else if (MONO_IS_STORE_MEMINDEX (ins))
14175 g_assert_not_reached ();
14180 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14181 printf ("\t %.3s %d", spec, ins->dreg);
14182 num_sregs = mono_inst_get_src_registers (ins, sregs);
14183 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14184 printf (" %d", sregs [srcindex]);
14191 regtype = spec [MONO_INST_DEST];
14192 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14195 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14196 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14197 MonoInst *store_ins;
14199 MonoInst *def_ins = ins;
14200 int dreg = ins->dreg; /* The original vreg */
14202 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14204 if (var->opcode == OP_REGVAR) {
14205 ins->dreg = var->dreg;
14206 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14208 * Instead of emitting a load+store, use a _membase opcode.
14210 g_assert (var->opcode == OP_REGOFFSET);
14211 if (ins->opcode == OP_MOVE) {
14215 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14216 ins->inst_basereg = var->inst_basereg;
14217 ins->inst_offset = var->inst_offset;
14220 spec = INS_INFO (ins->opcode);
14224 g_assert (var->opcode == OP_REGOFFSET);
14226 prev_dreg = ins->dreg;
14228 /* Invalidate any previous lvreg for this vreg */
14229 vreg_to_lvreg [ins->dreg] = 0;
14233 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14235 store_opcode = OP_STOREI8_MEMBASE_REG;
14238 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14240 #if SIZEOF_REGISTER != 8
14241 if (regtype == 'l') {
14242 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14243 mono_bblock_insert_after_ins (bb, ins, store_ins);
14244 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14245 mono_bblock_insert_after_ins (bb, ins, store_ins);
14246 def_ins = store_ins;
14251 g_assert (store_opcode != OP_STOREV_MEMBASE);
14253 /* Try to fuse the store into the instruction itself */
14254 /* FIXME: Add more instructions */
14255 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14256 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14257 ins->inst_imm = ins->inst_c0;
14258 ins->inst_destbasereg = var->inst_basereg;
14259 ins->inst_offset = var->inst_offset;
14260 spec = INS_INFO (ins->opcode);
14261 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14262 ins->opcode = store_opcode;
14263 ins->inst_destbasereg = var->inst_basereg;
14264 ins->inst_offset = var->inst_offset;
14268 tmp_reg = ins->dreg;
14269 ins->dreg = ins->sreg2;
14270 ins->sreg2 = tmp_reg;
14273 spec2 [MONO_INST_DEST] = ' ';
14274 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14275 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14276 spec2 [MONO_INST_SRC3] = ' ';
14278 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14279 // FIXME: The backends expect the base reg to be in inst_basereg
14280 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14282 ins->inst_basereg = var->inst_basereg;
14283 ins->inst_offset = var->inst_offset;
14284 spec = INS_INFO (ins->opcode);
14286 /* printf ("INS: "); mono_print_ins (ins); */
14287 /* Create a store instruction */
14288 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14290 /* Insert it after the instruction */
14291 mono_bblock_insert_after_ins (bb, ins, store_ins);
14293 def_ins = store_ins;
14296 * We can't assign ins->dreg to var->dreg here, since the
14297 * sregs could use it. So set a flag, and do it after
14300 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14301 dest_has_lvreg = TRUE;
14306 if (def_ins && !live_range_start [dreg]) {
14307 live_range_start [dreg] = def_ins;
14308 live_range_start_bb [dreg] = bb;
14311 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14314 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14315 tmp->inst_c1 = dreg;
14316 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14323 num_sregs = mono_inst_get_src_registers (ins, sregs);
14324 for (srcindex = 0; srcindex < 3; ++srcindex) {
14325 regtype = spec [MONO_INST_SRC1 + srcindex];
14326 sreg = sregs [srcindex];
14328 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14329 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14330 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14331 MonoInst *use_ins = ins;
14332 MonoInst *load_ins;
14333 guint32 load_opcode;
14335 if (var->opcode == OP_REGVAR) {
14336 sregs [srcindex] = var->dreg;
14337 //mono_inst_set_src_registers (ins, sregs);
14338 live_range_end [sreg] = use_ins;
14339 live_range_end_bb [sreg] = bb;
14341 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14344 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14345 /* var->dreg is a hreg */
14346 tmp->inst_c1 = sreg;
14347 mono_bblock_insert_after_ins (bb, ins, tmp);
14353 g_assert (var->opcode == OP_REGOFFSET);
14355 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14357 g_assert (load_opcode != OP_LOADV_MEMBASE);
14359 if (vreg_to_lvreg [sreg]) {
14360 g_assert (vreg_to_lvreg [sreg] != -1);
14362 /* The variable is already loaded to an lvreg */
14363 if (G_UNLIKELY (cfg->verbose_level > 2))
14364 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14365 sregs [srcindex] = vreg_to_lvreg [sreg];
14366 //mono_inst_set_src_registers (ins, sregs);
14370 /* Try to fuse the load into the instruction */
14371 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14372 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14373 sregs [0] = var->inst_basereg;
14374 //mono_inst_set_src_registers (ins, sregs);
14375 ins->inst_offset = var->inst_offset;
14376 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14377 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14378 sregs [1] = var->inst_basereg;
14379 //mono_inst_set_src_registers (ins, sregs);
14380 ins->inst_offset = var->inst_offset;
14382 if (MONO_IS_REAL_MOVE (ins)) {
14383 ins->opcode = OP_NOP;
14386 //printf ("%d ", srcindex); mono_print_ins (ins);
14388 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14390 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14391 if (var->dreg == prev_dreg) {
14393 * sreg refers to the value loaded by the load
14394 * emitted below, but we need to use ins->dreg
14395 * since it refers to the store emitted earlier.
14399 g_assert (sreg != -1);
14400 vreg_to_lvreg [var->dreg] = sreg;
14401 g_assert (lvregs_len < 1024);
14402 lvregs [lvregs_len ++] = var->dreg;
14406 sregs [srcindex] = sreg;
14407 //mono_inst_set_src_registers (ins, sregs);
14409 #if SIZEOF_REGISTER != 8
14410 if (regtype == 'l') {
14411 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14412 mono_bblock_insert_before_ins (bb, ins, load_ins);
14413 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14414 mono_bblock_insert_before_ins (bb, ins, load_ins);
14415 use_ins = load_ins;
14420 #if SIZEOF_REGISTER == 4
14421 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14423 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14424 mono_bblock_insert_before_ins (bb, ins, load_ins);
14425 use_ins = load_ins;
14429 if (var->dreg < orig_next_vreg) {
14430 live_range_end [var->dreg] = use_ins;
14431 live_range_end_bb [var->dreg] = bb;
14434 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14437 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14438 tmp->inst_c1 = var->dreg;
14439 mono_bblock_insert_after_ins (bb, ins, tmp);
14443 mono_inst_set_src_registers (ins, sregs);
14445 if (dest_has_lvreg) {
14446 g_assert (ins->dreg != -1);
14447 vreg_to_lvreg [prev_dreg] = ins->dreg;
14448 g_assert (lvregs_len < 1024);
14449 lvregs [lvregs_len ++] = prev_dreg;
14450 dest_has_lvreg = FALSE;
14454 tmp_reg = ins->dreg;
14455 ins->dreg = ins->sreg2;
14456 ins->sreg2 = tmp_reg;
14459 if (MONO_IS_CALL (ins)) {
14460 /* Clear vreg_to_lvreg array */
14461 for (i = 0; i < lvregs_len; i++)
14462 vreg_to_lvreg [lvregs [i]] = 0;
14464 } else if (ins->opcode == OP_NOP) {
14466 MONO_INST_NULLIFY_SREGS (ins);
14469 if (cfg->verbose_level > 2)
14470 mono_print_ins_index (1, ins);
14473 /* Extend the live range based on the liveness info */
14474 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14475 for (i = 0; i < cfg->num_varinfo; i ++) {
14476 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14478 if (vreg_is_volatile (cfg, vi->vreg))
14479 /* The liveness info is incomplete */
14482 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14483 /* Live from at least the first ins of this bb */
14484 live_range_start [vi->vreg] = bb->code;
14485 live_range_start_bb [vi->vreg] = bb;
14488 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14489 /* Live at least until the last ins of this bb */
14490 live_range_end [vi->vreg] = bb->last_ins;
14491 live_range_end_bb [vi->vreg] = bb;
14498 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14499 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14501 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14502 for (i = 0; i < cfg->num_varinfo; ++i) {
14503 int vreg = MONO_VARINFO (cfg, i)->vreg;
14506 if (live_range_start [vreg]) {
14507 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14509 ins->inst_c1 = vreg;
14510 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14512 if (live_range_end [vreg]) {
14513 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14515 ins->inst_c1 = vreg;
14516 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14517 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14519 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14524 if (cfg->gsharedvt_locals_var_ins) {
14525 /* Nullify if unused */
14526 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14527 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14530 g_free (live_range_start);
14531 g_free (live_range_end);
14532 g_free (live_range_start_bb);
14533 g_free (live_range_end_bb);
14538 * - use 'iadd' instead of 'int_add'
14539 * - handling ovf opcodes: decompose in method_to_ir.
14540 * - unify iregs/fregs
14541 * -> partly done, the missing parts are:
14542 * - a more complete unification would involve unifying the hregs as well, so
14543 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14544 * would no longer map to the machine hregs, so the code generators would need to
14545 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14546 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14547 * fp/non-fp branches speeds it up by about 15%.
14548 * - use sext/zext opcodes instead of shifts
14550 * - get rid of TEMPLOADs if possible and use vregs instead
14551 * - clean up usage of OP_P/OP_ opcodes
14552 * - cleanup usage of DUMMY_USE
14553 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14555 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14556 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14557 * - make sure handle_stack_args () is called before the branch is emitted
14558 * - when the new IR is done, get rid of all unused stuff
14559 * - COMPARE/BEQ as separate instructions or unify them ?
14560 * - keeping them separate allows specialized compare instructions like
14561 * compare_imm, compare_membase
14562 * - most back ends unify fp compare+branch, fp compare+ceq
14563 * - integrate mono_save_args into inline_method
14564 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14565 * - handle long shift opts on 32 bit platforms somehow: they require
14566 * 3 sregs (2 for arg1 and 1 for arg2)
14567 * - make byref a 'normal' type.
14568 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14569 * variable if needed.
14570 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14571 * like inline_method.
14572 * - remove inlining restrictions
14573 * - fix LNEG and enable cfold of INEG
14574 * - generalize x86 optimizations like ldelema as a peephole optimization
14575 * - add store_mem_imm for amd64
14576 * - optimize the loading of the interruption flag in the managed->native wrappers
14577 * - avoid special handling of OP_NOP in passes
14578 * - move code inserting instructions into one function/macro.
14579 * - try a coalescing phase after liveness analysis
14580 * - add float -> vreg conversion + local optimizations on !x86
14581 * - figure out how to handle decomposed branches during optimizations, ie.
14582 * compare+branch, op_jump_table+op_br etc.
14583 * - promote RuntimeXHandles to vregs
14584 * - vtype cleanups:
14585 * - add a NEW_VARLOADA_VREG macro
14586 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14587 * accessing vtype fields.
14588 * - get rid of I8CONST on 64 bit platforms
14589 * - dealing with the increase in code size due to branches created during opcode
14591 * - use extended basic blocks
14592 * - all parts of the JIT
14593 * - handle_global_vregs () && local regalloc
14594 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14595 * - sources of increase in code size:
14598 * - isinst and castclass
14599 * - lvregs not allocated to global registers even if used multiple times
14600 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14602 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14603 * - add all micro optimizations from the old JIT
14604 * - put tree optimizations into the deadce pass
14605 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14606 * specific function.
14607 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14608 * fcompare + branchCC.
14609 * - create a helper function for allocating a stack slot, taking into account
14610 * MONO_CFG_HAS_SPILLUP.
14612 * - merge the ia64 switch changes.
14613 * - optimize mono_regstate2_alloc_int/float.
14614 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14615 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14616 * parts of the tree could be separated by other instructions, killing the tree
14617 * arguments, or stores killing loads etc. Also, should we fold loads into other
14618 * instructions if the result of the load is used multiple times ?
14619 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14620 * - LAST MERGE: 108395.
14621 * - when returning vtypes in registers, generate IR and append it to the end of the
14622 * last bb instead of doing it in the epilog.
14623 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14631 - When to decompose opcodes:
14632 - earlier: this makes some optimizations hard to implement, since the low level IR
14633 no longer contains the neccessary information. But it is easier to do.
14634 - later: harder to implement, enables more optimizations.
14635 - Branches inside bblocks:
14636 - created when decomposing complex opcodes.
14637 - branches to another bblock: harmless, but not tracked by the branch
14638 optimizations, so need to branch to a label at the start of the bblock.
14639 - branches to inside the same bblock: very problematic, trips up the local
14640 reg allocator. Can be fixed by spitting the current bblock, but that is a
14641 complex operation, since some local vregs can become global vregs etc.
14642 - Local/global vregs:
14643 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14644 local register allocator.
14645 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14646 structure, created by mono_create_var (). Assigned to hregs or the stack by
14647 the global register allocator.
14648 - When to do optimizations like alu->alu_imm:
14649 - earlier -> saves work later on since the IR will be smaller/simpler
14650 - later -> can work on more instructions
14651 - Handling of valuetypes:
14652 - When a vtype is pushed on the stack, a new temporary is created, an
14653 instruction computing its address (LDADDR) is emitted and pushed on
14654 the stack. Need to optimize cases when the vtype is used immediately as in
14655 argument passing, stloc etc.
14656 - Instead of the to_end stuff in the old JIT, simply call the function handling
14657 the values on the stack before emitting the last instruction of the bb.
14660 #endif /* DISABLE_JIT */