2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_domain_get;
150 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
153 * Instruction metadata
161 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
162 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
168 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
173 /* keep in sync with the enum in mini.h */
176 #include "mini-ops.h"
181 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
182 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
184 * This should contain the index of the last sreg + 1. This is not the same
185 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
187 const gint8 ins_sreg_counts[] = {
188 #include "mini-ops.h"
193 #define MONO_INIT_VARINFO(vi,id) do { \
194 (vi)->range.first_use.pos.bid = 0xffff; \
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_lreg (MonoCompile *cfg)
208 return alloc_lreg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_get_underlying_type (type);
275 switch (type->type) {
288 case MONO_TYPE_FNPTR:
290 case MONO_TYPE_CLASS:
291 case MONO_TYPE_STRING:
292 case MONO_TYPE_OBJECT:
293 case MONO_TYPE_SZARRAY:
294 case MONO_TYPE_ARRAY:
298 #if SIZEOF_REGISTER == 8
304 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
307 case MONO_TYPE_VALUETYPE:
308 if (type->data.klass->enumtype) {
309 type = mono_class_enum_basetype (type->data.klass);
312 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
315 case MONO_TYPE_TYPEDBYREF:
317 case MONO_TYPE_GENERICINST:
318 type = &type->data.generic_class->container_class->byval_arg;
322 g_assert (cfg->gshared);
323 if (mini_type_var_is_vt (type))
326 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
328 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
334 mono_print_bb (MonoBasicBlock *bb, const char *msg)
339 printf ("\n%s %d: [IN: ", msg, bb->block_num);
340 for (i = 0; i < bb->in_count; ++i)
341 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
343 for (i = 0; i < bb->out_count; ++i)
344 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
346 for (tree = bb->code; tree; tree = tree->next)
347 mono_print_ins_index (-1, tree);
351 mono_create_helper_signatures (void)
353 helper_sig_domain_get = mono_create_icall_signature ("ptr");
354 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
357 static MONO_NEVER_INLINE void
358 break_on_unverified (void)
360 if (mini_get_debug_options ()->break_on_unverified)
364 static MONO_NEVER_INLINE void
365 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
367 char *method_fname = mono_method_full_name (method, TRUE);
368 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
369 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
370 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
371 g_free (method_fname);
372 g_free (cil_method_fname);
375 static MONO_NEVER_INLINE void
376 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
378 char *method_fname = mono_method_full_name (method, TRUE);
379 char *field_fname = mono_field_full_name (field);
380 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
381 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
382 g_free (method_fname);
383 g_free (field_fname);
386 static MONO_NEVER_INLINE void
387 inline_failure (MonoCompile *cfg, const char *msg)
389 if (cfg->verbose_level >= 2)
390 printf ("inline failed: %s\n", msg);
391 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
394 static MONO_NEVER_INLINE void
395 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
397 if (cfg->verbose_level > 2) \
398 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
402 static MONO_NEVER_INLINE void
403 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
406 if (cfg->verbose_level >= 2)
407 printf ("%s\n", cfg->exception_message);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
412 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
413 * foo<T> (int i) { ldarg.0; box T; }
415 #define UNVERIFIED do { \
416 if (cfg->gsharedvt) { \
417 if (cfg->verbose_level > 2) \
418 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
419 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
420 goto exception_exit; \
422 break_on_unverified (); \
426 #define GET_BBLOCK(cfg,tblock,ip) do { \
427 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
429 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
430 NEW_BBLOCK (cfg, (tblock)); \
431 (tblock)->cil_code = (ip); \
432 ADD_BBLOCK (cfg, (tblock)); \
436 #if defined(TARGET_X86) || defined(TARGET_AMD64)
437 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
438 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
439 (dest)->dreg = alloc_ireg_mp ((cfg)); \
440 (dest)->sreg1 = (sr1); \
441 (dest)->sreg2 = (sr2); \
442 (dest)->inst_imm = (imm); \
443 (dest)->backend.shift_amount = (shift); \
444 MONO_ADD_INS ((cfg)->cbb, (dest)); \
448 /* Emit conversions so both operands of a binary opcode are of the same type */
450 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
452 MonoInst *arg1 = *arg1_ref;
453 MonoInst *arg2 = *arg2_ref;
456 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
457 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
460 /* Mixing r4/r8 is allowed by the spec */
461 if (arg1->type == STACK_R4) {
462 int dreg = alloc_freg (cfg);
464 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
465 conv->type = STACK_R8;
469 if (arg2->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
473 conv->type = STACK_R8;
479 #if SIZEOF_REGISTER == 8
480 /* FIXME: Need to add many more cases */
481 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
484 int dr = alloc_preg (cfg);
485 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
486 (ins)->sreg2 = widen->dreg;
491 #define ADD_BINOP(op) do { \
492 MONO_INST_NEW (cfg, ins, (op)); \
494 ins->sreg1 = sp [0]->dreg; \
495 ins->sreg2 = sp [1]->dreg; \
496 type_from_op (cfg, ins, sp [0], sp [1]); \
498 /* Have to insert a widening op */ \
499 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
500 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
501 MONO_ADD_INS ((cfg)->cbb, (ins)); \
502 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
505 #define ADD_UNOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 type_from_op (cfg, ins, sp [0], NULL); \
511 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
512 MONO_ADD_INS ((cfg)->cbb, (ins)); \
513 *sp++ = mono_decompose_opcode (cfg, ins); \
516 #define ADD_BINCOND(next_block) do { \
519 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
520 cmp->sreg1 = sp [0]->dreg; \
521 cmp->sreg2 = sp [1]->dreg; \
522 type_from_op (cfg, cmp, sp [0], sp [1]); \
524 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
525 type_from_op (cfg, ins, sp [0], sp [1]); \
526 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
527 GET_BBLOCK (cfg, tblock, target); \
528 link_bblock (cfg, cfg->cbb, tblock); \
529 ins->inst_true_bb = tblock; \
530 if ((next_block)) { \
531 link_bblock (cfg, cfg->cbb, (next_block)); \
532 ins->inst_false_bb = (next_block); \
533 start_new_bblock = 1; \
535 GET_BBLOCK (cfg, tblock, ip); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_false_bb = tblock; \
538 start_new_bblock = 2; \
540 if (sp != stack_start) { \
541 handle_stack_args (cfg, stack_start, sp - stack_start); \
542 CHECK_UNVERIFIABLE (cfg); \
544 MONO_ADD_INS (cfg->cbb, cmp); \
545 MONO_ADD_INS (cfg->cbb, ins); \
549 * link_bblock: Links two basic blocks
551 * links two basic blocks in the control flow graph, the 'from'
552 * argument is the starting block and the 'to' argument is the block
553 * the control flow ends to after 'from'.
556 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
558 MonoBasicBlock **newa;
562 if (from->cil_code) {
564 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
566 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
569 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
571 printf ("edge from entry to exit\n");
576 for (i = 0; i < from->out_count; ++i) {
577 if (to == from->out_bb [i]) {
583 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
584 for (i = 0; i < from->out_count; ++i) {
585 newa [i] = from->out_bb [i];
593 for (i = 0; i < to->in_count; ++i) {
594 if (from == to->in_bb [i]) {
600 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
601 for (i = 0; i < to->in_count; ++i) {
602 newa [i] = to->in_bb [i];
611 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
613 link_bblock (cfg, from, to);
617 * mono_find_block_region:
619 * We mark each basic block with a region ID. We use that to avoid BB
620 * optimizations when blocks are in different regions.
623 * A region token that encodes where this region is, and information
624 * about the clause owner for this block.
626 * The region encodes the try/catch/filter clause that owns this block
627 * as well as the type. -1 is a special value that represents a block
628 * that is in none of try/catch/filter.
631 mono_find_block_region (MonoCompile *cfg, int offset)
633 MonoMethodHeader *header = cfg->header;
634 MonoExceptionClause *clause;
637 for (i = 0; i < header->num_clauses; ++i) {
638 clause = &header->clauses [i];
639 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
640 (offset < (clause->handler_offset)))
641 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
643 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
644 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
645 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
646 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
647 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
649 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
652 for (i = 0; i < header->num_clauses; ++i) {
653 clause = &header->clauses [i];
655 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
656 return ((i + 1) << 8) | clause->flags;
663 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
665 MonoMethodHeader *header = cfg->header;
666 MonoExceptionClause *clause;
670 for (i = 0; i < header->num_clauses; ++i) {
671 clause = &header->clauses [i];
672 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
673 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
674 if (clause->flags == type)
675 res = g_list_append (res, clause);
682 mono_create_spvar_for_region (MonoCompile *cfg, int region)
686 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
690 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
691 /* prevent it from being register allocated */
692 var->flags |= MONO_INST_VOLATILE;
694 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
698 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
700 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
704 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
708 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
712 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
713 /* prevent it from being register allocated */
714 var->flags |= MONO_INST_VOLATILE;
716 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
722 * Returns the type used in the eval stack when @type is loaded.
723 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
726 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
730 type = mini_get_underlying_type (type);
731 inst->klass = klass = mono_class_from_mono_type (type);
733 inst->type = STACK_MP;
738 switch (type->type) {
740 inst->type = STACK_INV;
748 inst->type = STACK_I4;
753 case MONO_TYPE_FNPTR:
754 inst->type = STACK_PTR;
756 case MONO_TYPE_CLASS:
757 case MONO_TYPE_STRING:
758 case MONO_TYPE_OBJECT:
759 case MONO_TYPE_SZARRAY:
760 case MONO_TYPE_ARRAY:
761 inst->type = STACK_OBJ;
765 inst->type = STACK_I8;
768 inst->type = cfg->r4_stack_type;
771 inst->type = STACK_R8;
773 case MONO_TYPE_VALUETYPE:
774 if (type->data.klass->enumtype) {
775 type = mono_class_enum_basetype (type->data.klass);
779 inst->type = STACK_VTYPE;
782 case MONO_TYPE_TYPEDBYREF:
783 inst->klass = mono_defaults.typed_reference_class;
784 inst->type = STACK_VTYPE;
786 case MONO_TYPE_GENERICINST:
787 type = &type->data.generic_class->container_class->byval_arg;
791 g_assert (cfg->gshared);
792 if (mini_is_gsharedvt_type (type)) {
793 g_assert (cfg->gsharedvt);
794 inst->type = STACK_VTYPE;
796 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
800 g_error ("unknown type 0x%02x in eval stack type", type->type);
805 * The following tables are used to quickly validate the IL code in type_from_op ().
808 bin_num_table [STACK_MAX] [STACK_MAX] = {
809 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
814 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
816 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
822 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
825 /* reduce the size of this table */
827 bin_int_table [STACK_MAX] [STACK_MAX] = {
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
839 bin_comp_table [STACK_MAX] [STACK_MAX] = {
840 /* Inv i L p F & O vt r4 */
842 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
843 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
844 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
845 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
846 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
847 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
848 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
849 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
852 /* reduce the size of this table */
854 shift_table [STACK_MAX] [STACK_MAX] = {
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
866 * Tables to map from the non-specific opcode to the matching
867 * type-specific opcode.
869 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
871 binops_op_map [STACK_MAX] = {
872 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
875 /* handles from CEE_NEG to CEE_CONV_U8 */
877 unops_op_map [STACK_MAX] = {
878 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
881 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
883 ovfops_op_map [STACK_MAX] = {
884 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
887 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
889 ovf2ops_op_map [STACK_MAX] = {
890 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
893 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
895 ovf3ops_op_map [STACK_MAX] = {
896 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
899 /* handles from CEE_BEQ to CEE_BLT_UN */
901 beqops_op_map [STACK_MAX] = {
902 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
905 /* handles from CEE_CEQ to CEE_CLT_UN */
907 ceqops_op_map [STACK_MAX] = {
908 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
912 * Sets ins->type (the type on the eval stack) according to the
913 * type of the opcode and the arguments to it.
914 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
916 * FIXME: this function sets ins->type unconditionally in some cases, but
917 * it should set it to invalid for some types (a conv.x on an object)
920 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
922 switch (ins->opcode) {
929 /* FIXME: check unverifiable args for STACK_MP */
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += binops_op_map [ins->type];
938 ins->type = bin_int_table [src1->type] [src2->type];
939 ins->opcode += binops_op_map [ins->type];
944 ins->type = shift_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
950 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
951 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
952 ins->opcode = OP_LCOMPARE;
953 else if (src1->type == STACK_R4)
954 ins->opcode = OP_RCOMPARE;
955 else if (src1->type == STACK_R8)
956 ins->opcode = OP_FCOMPARE;
958 ins->opcode = OP_ICOMPARE;
960 case OP_ICOMPARE_IMM:
961 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
962 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
963 ins->opcode = OP_LCOMPARE_IMM;
975 ins->opcode += beqops_op_map [src1->type];
978 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
979 ins->opcode += ceqops_op_map [src1->type];
985 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
986 ins->opcode += ceqops_op_map [src1->type];
990 ins->type = neg_table [src1->type];
991 ins->opcode += unops_op_map [ins->type];
994 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
995 ins->type = src1->type;
997 ins->type = STACK_INV;
998 ins->opcode += unops_op_map [ins->type];
1004 ins->type = STACK_I4;
1005 ins->opcode += unops_op_map [src1->type];
1008 ins->type = STACK_R8;
1009 switch (src1->type) {
1012 ins->opcode = OP_ICONV_TO_R_UN;
1015 ins->opcode = OP_LCONV_TO_R_UN;
1019 case CEE_CONV_OVF_I1:
1020 case CEE_CONV_OVF_U1:
1021 case CEE_CONV_OVF_I2:
1022 case CEE_CONV_OVF_U2:
1023 case CEE_CONV_OVF_I4:
1024 case CEE_CONV_OVF_U4:
1025 ins->type = STACK_I4;
1026 ins->opcode += ovf3ops_op_map [src1->type];
1028 case CEE_CONV_OVF_I_UN:
1029 case CEE_CONV_OVF_U_UN:
1030 ins->type = STACK_PTR;
1031 ins->opcode += ovf2ops_op_map [src1->type];
1033 case CEE_CONV_OVF_I1_UN:
1034 case CEE_CONV_OVF_I2_UN:
1035 case CEE_CONV_OVF_I4_UN:
1036 case CEE_CONV_OVF_U1_UN:
1037 case CEE_CONV_OVF_U2_UN:
1038 case CEE_CONV_OVF_U4_UN:
1039 ins->type = STACK_I4;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1043 ins->type = STACK_PTR;
1044 switch (src1->type) {
1046 ins->opcode = OP_ICONV_TO_U;
1050 #if SIZEOF_VOID_P == 8
1051 ins->opcode = OP_LCONV_TO_U;
1053 ins->opcode = OP_MOVE;
1057 ins->opcode = OP_LCONV_TO_U;
1060 ins->opcode = OP_FCONV_TO_U;
1066 ins->type = STACK_I8;
1067 ins->opcode += unops_op_map [src1->type];
1069 case CEE_CONV_OVF_I8:
1070 case CEE_CONV_OVF_U8:
1071 ins->type = STACK_I8;
1072 ins->opcode += ovf3ops_op_map [src1->type];
1074 case CEE_CONV_OVF_U8_UN:
1075 case CEE_CONV_OVF_I8_UN:
1076 ins->type = STACK_I8;
1077 ins->opcode += ovf2ops_op_map [src1->type];
1080 ins->type = cfg->r4_stack_type;
1081 ins->opcode += unops_op_map [src1->type];
1084 ins->type = STACK_R8;
1085 ins->opcode += unops_op_map [src1->type];
1088 ins->type = STACK_R8;
1092 ins->type = STACK_I4;
1093 ins->opcode += ovfops_op_map [src1->type];
1096 case CEE_CONV_OVF_I:
1097 case CEE_CONV_OVF_U:
1098 ins->type = STACK_PTR;
1099 ins->opcode += ovfops_op_map [src1->type];
1102 case CEE_ADD_OVF_UN:
1104 case CEE_MUL_OVF_UN:
1106 case CEE_SUB_OVF_UN:
1107 ins->type = bin_num_table [src1->type] [src2->type];
1108 ins->opcode += ovfops_op_map [src1->type];
1109 if (ins->type == STACK_R8)
1110 ins->type = STACK_INV;
1112 case OP_LOAD_MEMBASE:
1113 ins->type = STACK_PTR;
1115 case OP_LOADI1_MEMBASE:
1116 case OP_LOADU1_MEMBASE:
1117 case OP_LOADI2_MEMBASE:
1118 case OP_LOADU2_MEMBASE:
1119 case OP_LOADI4_MEMBASE:
1120 case OP_LOADU4_MEMBASE:
1121 ins->type = STACK_PTR;
1123 case OP_LOADI8_MEMBASE:
1124 ins->type = STACK_I8;
1126 case OP_LOADR4_MEMBASE:
1127 ins->type = cfg->r4_stack_type;
1129 case OP_LOADR8_MEMBASE:
1130 ins->type = STACK_R8;
1133 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1137 if (ins->type == STACK_MP)
1138 ins->klass = mono_defaults.object_class;
1143 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1149 param_table [STACK_MAX] [STACK_MAX] = {
1154 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1159 switch (args->type) {
1169 for (i = 0; i < sig->param_count; ++i) {
1170 switch (args [i].type) {
1174 if (!sig->params [i]->byref)
1178 if (sig->params [i]->byref)
1180 switch (sig->params [i]->type) {
1181 case MONO_TYPE_CLASS:
1182 case MONO_TYPE_STRING:
1183 case MONO_TYPE_OBJECT:
1184 case MONO_TYPE_SZARRAY:
1185 case MONO_TYPE_ARRAY:
1192 if (sig->params [i]->byref)
1194 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1203 /*if (!param_table [args [i].type] [sig->params [i]->type])
1211 * When we need a pointer to the current domain many times in a method, we
1212 * call mono_domain_get() once and we store the result in a local variable.
1213 * This function returns the variable that represents the MonoDomain*.
1215 inline static MonoInst *
1216 mono_get_domainvar (MonoCompile *cfg)
1218 if (!cfg->domainvar)
1219 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1220 return cfg->domainvar;
1224 * The got_var contains the address of the Global Offset Table when AOT
1228 mono_get_got_var (MonoCompile *cfg)
1230 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1232 if (!cfg->got_var) {
1233 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1235 return cfg->got_var;
1239 mono_get_vtable_var (MonoCompile *cfg)
1241 g_assert (cfg->gshared);
1243 if (!cfg->rgctx_var) {
1244 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1245 /* force the var to be stack allocated */
1246 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1249 return cfg->rgctx_var;
1253 type_from_stack_type (MonoInst *ins) {
1254 switch (ins->type) {
1255 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1256 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1257 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1258 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1259 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1261 return &ins->klass->this_arg;
1262 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1263 case STACK_VTYPE: return &ins->klass->byval_arg;
1265 g_error ("stack type %d to monotype not handled\n", ins->type);
1270 static G_GNUC_UNUSED int
1271 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1273 t = mono_type_get_underlying_type (t);
1285 case MONO_TYPE_FNPTR:
1287 case MONO_TYPE_CLASS:
1288 case MONO_TYPE_STRING:
1289 case MONO_TYPE_OBJECT:
1290 case MONO_TYPE_SZARRAY:
1291 case MONO_TYPE_ARRAY:
1297 return cfg->r4_stack_type;
1300 case MONO_TYPE_VALUETYPE:
1301 case MONO_TYPE_TYPEDBYREF:
1303 case MONO_TYPE_GENERICINST:
1304 if (mono_type_generic_inst_is_valuetype (t))
1310 g_assert_not_reached ();
1317 array_access_to_klass (int opcode)
1321 return mono_defaults.byte_class;
1323 return mono_defaults.uint16_class;
1326 return mono_defaults.int_class;
1329 return mono_defaults.sbyte_class;
1332 return mono_defaults.int16_class;
1335 return mono_defaults.int32_class;
1337 return mono_defaults.uint32_class;
1340 return mono_defaults.int64_class;
1343 return mono_defaults.single_class;
1346 return mono_defaults.double_class;
1347 case CEE_LDELEM_REF:
1348 case CEE_STELEM_REF:
1349 return mono_defaults.object_class;
1351 g_assert_not_reached ();
1357 * We try to share variables when possible
1360 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1365 /* inlining can result in deeper stacks */
1366 if (slot >= cfg->header->max_stack)
1367 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1369 pos = ins->type - 1 + slot * STACK_MAX;
1371 switch (ins->type) {
1378 if ((vnum = cfg->intvars [pos]))
1379 return cfg->varinfo [vnum];
1380 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1381 cfg->intvars [pos] = res->inst_c0;
1384 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1390 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1393 * Don't use this if a generic_context is set, since that means AOT can't
1394 * look up the method using just the image+token.
1395 * table == 0 means this is a reference made from a wrapper.
1397 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1398 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1399 jump_info_token->image = image;
1400 jump_info_token->token = token;
1401 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1406 * This function is called to handle items that are left on the evaluation stack
1407 * at basic block boundaries. What happens is that we save the values to local variables
1408 * and we reload them later when first entering the target basic block (with the
1409 * handle_loaded_temps () function).
1410 * A single joint point will use the same variables (stored in the array bb->out_stack or
1411 * bb->in_stack, if the basic block is before or after the joint point).
1413 * This function needs to be called _before_ emitting the last instruction of
1414 * the bb (i.e. before emitting a branch).
1415 * If the stack merge fails at a join point, cfg->unverifiable is set.
1418 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1421 MonoBasicBlock *bb = cfg->cbb;
1422 MonoBasicBlock *outb;
1423 MonoInst *inst, **locals;
1428 if (cfg->verbose_level > 3)
1429 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1430 if (!bb->out_scount) {
1431 bb->out_scount = count;
1432 //printf ("bblock %d has out:", bb->block_num);
1434 for (i = 0; i < bb->out_count; ++i) {
1435 outb = bb->out_bb [i];
1436 /* exception handlers are linked, but they should not be considered for stack args */
1437 if (outb->flags & BB_EXCEPTION_HANDLER)
1439 //printf (" %d", outb->block_num);
1440 if (outb->in_stack) {
1442 bb->out_stack = outb->in_stack;
1448 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1449 for (i = 0; i < count; ++i) {
1451 * try to reuse temps already allocated for this purpouse, if they occupy the same
1452 * stack slot and if they are of the same type.
1453 * This won't cause conflicts since if 'local' is used to
1454 * store one of the values in the in_stack of a bblock, then
1455 * the same variable will be used for the same outgoing stack
1457 * This doesn't work when inlining methods, since the bblocks
1458 * in the inlined methods do not inherit their in_stack from
1459 * the bblock they are inlined to. See bug #58863 for an
1462 if (cfg->inlined_method)
1463 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1465 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1470 for (i = 0; i < bb->out_count; ++i) {
1471 outb = bb->out_bb [i];
1472 /* exception handlers are linked, but they should not be considered for stack args */
1473 if (outb->flags & BB_EXCEPTION_HANDLER)
1475 if (outb->in_scount) {
1476 if (outb->in_scount != bb->out_scount) {
1477 cfg->unverifiable = TRUE;
1480 continue; /* check they are the same locals */
1482 outb->in_scount = count;
1483 outb->in_stack = bb->out_stack;
1486 locals = bb->out_stack;
1488 for (i = 0; i < count; ++i) {
1489 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1490 inst->cil_code = sp [i]->cil_code;
1491 sp [i] = locals [i];
1492 if (cfg->verbose_level > 3)
1493 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1497 * It is possible that the out bblocks already have in_stack assigned, and
1498 * the in_stacks differ. In this case, we will store to all the different
1505 /* Find a bblock which has a different in_stack */
1507 while (bindex < bb->out_count) {
1508 outb = bb->out_bb [bindex];
1509 /* exception handlers are linked, but they should not be considered for stack args */
1510 if (outb->flags & BB_EXCEPTION_HANDLER) {
1514 if (outb->in_stack != locals) {
1515 for (i = 0; i < count; ++i) {
1516 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1517 inst->cil_code = sp [i]->cil_code;
1518 sp [i] = locals [i];
1519 if (cfg->verbose_level > 3)
1520 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1522 locals = outb->in_stack;
1532 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1534 int ibitmap_reg = alloc_preg (cfg);
1535 #ifdef COMPRESSED_INTERFACE_BITMAP
1537 MonoInst *res, *ins;
1538 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1539 MONO_ADD_INS (cfg->cbb, ins);
1541 if (cfg->compile_aot)
1542 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1544 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1545 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1546 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1548 int ibitmap_byte_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1552 if (cfg->compile_aot) {
1553 int iid_reg = alloc_preg (cfg);
1554 int shifted_iid_reg = alloc_preg (cfg);
1555 int ibitmap_byte_address_reg = alloc_preg (cfg);
1556 int masked_iid_reg = alloc_preg (cfg);
1557 int iid_one_bit_reg = alloc_preg (cfg);
1558 int iid_bit_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1561 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1564 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1565 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1566 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1568 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1575 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1576 * stored in "klass_reg" implements the interface "klass".
1579 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1581 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1585 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1586 * stored in "vtable_reg" implements the interface "klass".
1589 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1591 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1595 * Emit code which checks whenever the interface id of @klass is smaller than
1596 * than the value given by max_iid_reg.
1599 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1600 MonoBasicBlock *false_target)
1602 if (cfg->compile_aot) {
1603 int iid_reg = alloc_preg (cfg);
1604 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1605 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1610 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1612 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1615 /* Same as above, but obtains max_iid from a vtable */
1617 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1618 MonoBasicBlock *false_target)
1620 int max_iid_reg = alloc_preg (cfg);
1622 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1623 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1626 /* Same as above, but obtains max_iid from a klass */
1628 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1629 MonoBasicBlock *false_target)
1631 int max_iid_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1634 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1638 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1640 int idepth_reg = alloc_preg (cfg);
1641 int stypes_reg = alloc_preg (cfg);
1642 int stype = alloc_preg (cfg);
1644 mono_class_setup_supertypes (klass);
1646 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1649 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1654 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1655 } else if (cfg->compile_aot) {
1656 int const_reg = alloc_preg (cfg);
1657 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1658 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1662 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1666 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1668 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1672 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1674 int intf_reg = alloc_preg (cfg);
1676 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1677 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1682 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1686 * Variant of the above that takes a register to the class, not the vtable.
1689 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1691 int intf_bit_reg = alloc_preg (cfg);
1693 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1694 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1699 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1703 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1706 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1707 } else if (cfg->compile_aot) {
1708 int const_reg = alloc_preg (cfg);
1709 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1710 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1714 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1718 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1720 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1724 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1726 if (cfg->compile_aot) {
1727 int const_reg = alloc_preg (cfg);
1728 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1729 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1733 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1737 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1740 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1743 int rank_reg = alloc_preg (cfg);
1744 int eclass_reg = alloc_preg (cfg);
1746 g_assert (!klass_inst);
1747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1749 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1750 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1752 if (klass->cast_class == mono_defaults.object_class) {
1753 int parent_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1755 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1756 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1757 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1758 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1759 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1760 } else if (klass->cast_class == mono_defaults.enum_class) {
1761 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1762 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1763 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1765 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1766 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1769 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1770 /* Check that the object is a vector too */
1771 int bounds_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1773 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1774 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1777 int idepth_reg = alloc_preg (cfg);
1778 int stypes_reg = alloc_preg (cfg);
1779 int stype = alloc_preg (cfg);
1781 mono_class_setup_supertypes (klass);
1783 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1786 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1790 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1795 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1797 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1801 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1805 g_assert (val == 0);
1810 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1813 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1816 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1819 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1821 #if SIZEOF_REGISTER == 8
1823 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1829 val_reg = alloc_preg (cfg);
1831 if (SIZEOF_REGISTER == 8)
1832 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1834 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1837 /* This could be optimized further if neccesary */
1839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1846 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1860 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1877 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1884 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1885 g_assert (size < 10000);
1888 /* This could be optimized further if neccesary */
1890 cur_reg = alloc_preg (cfg);
1891 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1892 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1899 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1901 cur_reg = alloc_preg (cfg);
1902 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1903 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1911 cur_reg = alloc_preg (cfg);
1912 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1913 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1919 cur_reg = alloc_preg (cfg);
1920 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1921 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1937 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1941 if (cfg->compile_aot) {
1942 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1943 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1945 ins->sreg2 = c->dreg;
1946 MONO_ADD_INS (cfg->cbb, ins);
1948 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1950 ins->inst_offset = mini_get_tls_offset (tls_key);
1951 MONO_ADD_INS (cfg->cbb, ins);
1958 * Emit IR to push the current LMF onto the LMF stack.
1961 emit_push_lmf (MonoCompile *cfg)
1964 * Emit IR to push the LMF:
1965 * lmf_addr = <lmf_addr from tls>
1966 * lmf->lmf_addr = lmf_addr
1967 * lmf->prev_lmf = *lmf_addr
1970 int lmf_reg, prev_lmf_reg;
1971 MonoInst *ins, *lmf_ins;
1976 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1977 /* Load current lmf */
1978 lmf_ins = mono_get_lmf_intrinsic (cfg);
1980 MONO_ADD_INS (cfg->cbb, lmf_ins);
1981 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1982 lmf_reg = ins->dreg;
1983 /* Save previous_lmf */
1984 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1986 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1989 * Store lmf_addr in a variable, so it can be allocated to a global register.
1991 if (!cfg->lmf_addr_var)
1992 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1995 ins = mono_get_jit_tls_intrinsic (cfg);
1997 int jit_tls_dreg = ins->dreg;
1999 MONO_ADD_INS (cfg->cbb, ins);
2000 lmf_reg = alloc_preg (cfg);
2001 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2003 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2006 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2008 MONO_ADD_INS (cfg->cbb, lmf_ins);
2011 MonoInst *args [16], *jit_tls_ins, *ins;
2013 /* Inline mono_get_lmf_addr () */
2014 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2016 /* Load mono_jit_tls_id */
2017 if (cfg->compile_aot)
2018 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2020 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2021 /* call pthread_getspecific () */
2022 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2023 /* lmf_addr = &jit_tls->lmf */
2024 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2027 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2031 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2033 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2034 lmf_reg = ins->dreg;
2036 prev_lmf_reg = alloc_preg (cfg);
2037 /* Save previous_lmf */
2038 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2039 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2041 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2048 * Emit IR to pop the current LMF from the LMF stack.
2051 emit_pop_lmf (MonoCompile *cfg)
2053 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2059 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2060 lmf_reg = ins->dreg;
2062 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2063 /* Load previous_lmf */
2064 prev_lmf_reg = alloc_preg (cfg);
2065 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2067 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2070 * Emit IR to pop the LMF:
2071 * *(lmf->lmf_addr) = lmf->prev_lmf
2073 /* This could be called before emit_push_lmf () */
2074 if (!cfg->lmf_addr_var)
2075 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2076 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2078 prev_lmf_reg = alloc_preg (cfg);
2079 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2080 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2085 emit_instrumentation_call (MonoCompile *cfg, void *func)
2087 MonoInst *iargs [1];
2090 * Avoid instrumenting inlined methods since it can
2091 * distort profiling results.
2093 if (cfg->method != cfg->current_method)
2096 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2097 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2098 mono_emit_jit_icall (cfg, func, iargs);
2103 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2106 type = mini_get_underlying_type (type);
2107 switch (type->type) {
2108 case MONO_TYPE_VOID:
2109 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2116 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2120 case MONO_TYPE_FNPTR:
2121 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2122 case MONO_TYPE_CLASS:
2123 case MONO_TYPE_STRING:
2124 case MONO_TYPE_OBJECT:
2125 case MONO_TYPE_SZARRAY:
2126 case MONO_TYPE_ARRAY:
2127 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2130 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2133 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2135 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2137 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2138 case MONO_TYPE_VALUETYPE:
2139 if (type->data.klass->enumtype) {
2140 type = mono_class_enum_basetype (type->data.klass);
2143 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2144 case MONO_TYPE_TYPEDBYREF:
2145 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2146 case MONO_TYPE_GENERICINST:
2147 type = &type->data.generic_class->container_class->byval_arg;
2150 case MONO_TYPE_MVAR:
2152 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2154 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2160 * target_type_is_incompatible:
2161 * @cfg: MonoCompile context
2163 * Check that the item @arg on the evaluation stack can be stored
2164 * in the target type (can be a local, or field, etc).
2165 * The cfg arg can be used to check if we need verification or just
2168 * Returns: non-0 value if arg can't be stored on a target.
2171 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2173 MonoType *simple_type;
2176 if (target->byref) {
2177 /* FIXME: check that the pointed to types match */
2178 if (arg->type == STACK_MP)
2179 return target->type != MONO_TYPE_I && arg->klass != mono_class_from_mono_type (target);
2180 if (arg->type == STACK_PTR)
2185 simple_type = mini_get_underlying_type (target);
2186 switch (simple_type->type) {
2187 case MONO_TYPE_VOID:
2195 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2199 /* STACK_MP is needed when setting pinned locals */
2200 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2205 case MONO_TYPE_FNPTR:
2207 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2208 * in native int. (#688008).
2210 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2213 case MONO_TYPE_CLASS:
2214 case MONO_TYPE_STRING:
2215 case MONO_TYPE_OBJECT:
2216 case MONO_TYPE_SZARRAY:
2217 case MONO_TYPE_ARRAY:
2218 if (arg->type != STACK_OBJ)
2220 /* FIXME: check type compatibility */
2224 if (arg->type != STACK_I8)
2228 if (arg->type != cfg->r4_stack_type)
2232 if (arg->type != STACK_R8)
2235 case MONO_TYPE_VALUETYPE:
2236 if (arg->type != STACK_VTYPE)
2238 klass = mono_class_from_mono_type (simple_type);
2239 if (klass != arg->klass)
2242 case MONO_TYPE_TYPEDBYREF:
2243 if (arg->type != STACK_VTYPE)
2245 klass = mono_class_from_mono_type (simple_type);
2246 if (klass != arg->klass)
2249 case MONO_TYPE_GENERICINST:
2250 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2251 if (arg->type != STACK_VTYPE)
2253 klass = mono_class_from_mono_type (simple_type);
2254 /* The second cases is needed when doing partial sharing */
2255 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2259 if (arg->type != STACK_OBJ)
2261 /* FIXME: check type compatibility */
2265 case MONO_TYPE_MVAR:
2266 g_assert (cfg->gshared);
2267 if (mini_type_var_is_vt (simple_type)) {
2268 if (arg->type != STACK_VTYPE)
2271 if (arg->type != STACK_OBJ)
2276 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2282 * Prepare arguments for passing to a function call.
2283 * Return a non-zero value if the arguments can't be passed to the given
2285 * The type checks are not yet complete and some conversions may need
2286 * casts on 32 or 64 bit architectures.
2288 * FIXME: implement this using target_type_is_incompatible ()
2291 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2293 MonoType *simple_type;
2297 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2301 for (i = 0; i < sig->param_count; ++i) {
2302 if (sig->params [i]->byref) {
2303 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2307 simple_type = mini_get_underlying_type (sig->params [i]);
2309 switch (simple_type->type) {
2310 case MONO_TYPE_VOID:
2319 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2325 case MONO_TYPE_FNPTR:
2326 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2329 case MONO_TYPE_CLASS:
2330 case MONO_TYPE_STRING:
2331 case MONO_TYPE_OBJECT:
2332 case MONO_TYPE_SZARRAY:
2333 case MONO_TYPE_ARRAY:
2334 if (args [i]->type != STACK_OBJ)
2339 if (args [i]->type != STACK_I8)
2343 if (args [i]->type != cfg->r4_stack_type)
2347 if (args [i]->type != STACK_R8)
2350 case MONO_TYPE_VALUETYPE:
2351 if (simple_type->data.klass->enumtype) {
2352 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2355 if (args [i]->type != STACK_VTYPE)
2358 case MONO_TYPE_TYPEDBYREF:
2359 if (args [i]->type != STACK_VTYPE)
2362 case MONO_TYPE_GENERICINST:
2363 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2366 case MONO_TYPE_MVAR:
2368 if (args [i]->type != STACK_VTYPE)
2372 g_error ("unknown type 0x%02x in check_call_signature",
2380 callvirt_to_call (int opcode)
2383 case OP_CALL_MEMBASE:
2385 case OP_VOIDCALL_MEMBASE:
2387 case OP_FCALL_MEMBASE:
2389 case OP_RCALL_MEMBASE:
2391 case OP_VCALL_MEMBASE:
2393 case OP_LCALL_MEMBASE:
2396 g_assert_not_reached ();
2403 callvirt_to_call_reg (int opcode)
2406 case OP_CALL_MEMBASE:
2408 case OP_VOIDCALL_MEMBASE:
2409 return OP_VOIDCALL_REG;
2410 case OP_FCALL_MEMBASE:
2411 return OP_FCALL_REG;
2412 case OP_RCALL_MEMBASE:
2413 return OP_RCALL_REG;
2414 case OP_VCALL_MEMBASE:
2415 return OP_VCALL_REG;
2416 case OP_LCALL_MEMBASE:
2417 return OP_LCALL_REG;
2419 g_assert_not_reached ();
2425 /* Either METHOD or IMT_ARG needs to be set */
2427 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2431 if (COMPILE_LLVM (cfg)) {
2432 method_reg = alloc_preg (cfg);
2435 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2436 } else if (cfg->compile_aot) {
2437 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2440 MONO_INST_NEW (cfg, ins, OP_PCONST);
2441 ins->inst_p0 = method;
2442 ins->dreg = method_reg;
2443 MONO_ADD_INS (cfg->cbb, ins);
2447 call->imt_arg_reg = method_reg;
2449 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2453 method_reg = alloc_preg (cfg);
2456 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2457 } else if (cfg->compile_aot) {
2458 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2461 MONO_INST_NEW (cfg, ins, OP_PCONST);
2462 ins->inst_p0 = method;
2463 ins->dreg = method_reg;
2464 MONO_ADD_INS (cfg->cbb, ins);
2467 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2470 static MonoJumpInfo *
2471 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2473 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2477 ji->data.target = target;
2483 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2486 return mono_class_check_context_used (klass);
2492 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2495 return mono_method_check_context_used (method);
2501 * check_method_sharing:
2503 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2506 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2508 gboolean pass_vtable = FALSE;
2509 gboolean pass_mrgctx = FALSE;
2511 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2512 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2513 gboolean sharable = FALSE;
2515 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2519 * Pass vtable iff target method might
2520 * be shared, which means that sharing
2521 * is enabled for its class and its
2522 * context is sharable (and it's not a
2525 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2529 if (mini_method_get_context (cmethod) &&
2530 mini_method_get_context (cmethod)->method_inst) {
2531 g_assert (!pass_vtable);
2533 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2536 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2541 if (out_pass_vtable)
2542 *out_pass_vtable = pass_vtable;
2543 if (out_pass_mrgctx)
2544 *out_pass_mrgctx = pass_mrgctx;
2547 inline static MonoCallInst *
2548 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2549 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2553 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2561 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2563 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2565 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
2568 call->signature = sig;
2569 call->rgctx_reg = rgctx;
2570 sig_ret = mini_get_underlying_type (sig->ret);
2572 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2575 if (mini_type_is_vtype (sig_ret)) {
2576 call->vret_var = cfg->vret_addr;
2577 //g_assert_not_reached ();
2579 } else if (mini_type_is_vtype (sig_ret)) {
2580 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2583 temp->backend.is_pinvoke = sig->pinvoke;
2586 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2587 * address of return value to increase optimization opportunities.
2588 * Before vtype decomposition, the dreg of the call ins itself represents the
2589 * fact the call modifies the return value. After decomposition, the call will
2590 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2591 * will be transformed into an LDADDR.
2593 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2594 loada->dreg = alloc_preg (cfg);
2595 loada->inst_p0 = temp;
2596 /* We reference the call too since call->dreg could change during optimization */
2597 loada->inst_p1 = call;
2598 MONO_ADD_INS (cfg->cbb, loada);
2600 call->inst.dreg = temp->dreg;
2602 call->vret_var = loada;
2603 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2604 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2606 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2607 if (COMPILE_SOFT_FLOAT (cfg)) {
2609 * If the call has a float argument, we would need to do an r8->r4 conversion using
2610 * an icall, but that cannot be done during the call sequence since it would clobber
2611 * the call registers + the stack. So we do it before emitting the call.
2613 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2615 MonoInst *in = call->args [i];
2617 if (i >= sig->hasthis)
2618 t = sig->params [i - sig->hasthis];
2620 t = &mono_defaults.int_class->byval_arg;
2621 t = mono_type_get_underlying_type (t);
2623 if (!t->byref && t->type == MONO_TYPE_R4) {
2624 MonoInst *iargs [1];
2628 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2630 /* The result will be in an int vreg */
2631 call->args [i] = conv;
2637 call->need_unbox_trampoline = unbox_trampoline;
2640 if (COMPILE_LLVM (cfg))
2641 mono_llvm_emit_call (cfg, call);
2643 mono_arch_emit_call (cfg, call);
2645 mono_arch_emit_call (cfg, call);
2648 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2649 cfg->flags |= MONO_CFG_HAS_CALLS;
2655 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2657 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2658 cfg->uses_rgctx_reg = TRUE;
2659 call->rgctx_reg = TRUE;
2661 call->rgctx_arg_reg = rgctx_reg;
2665 inline static MonoInst*
2666 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2671 gboolean check_sp = FALSE;
2673 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2674 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2676 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2681 rgctx_reg = mono_alloc_preg (cfg);
2682 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2686 if (!cfg->stack_inbalance_var)
2687 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2689 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2690 ins->dreg = cfg->stack_inbalance_var->dreg;
2691 MONO_ADD_INS (cfg->cbb, ins);
2694 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2696 call->inst.sreg1 = addr->dreg;
2699 emit_imt_argument (cfg, call, NULL, imt_arg);
2701 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2706 sp_reg = mono_alloc_preg (cfg);
2708 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2710 MONO_ADD_INS (cfg->cbb, ins);
2712 /* Restore the stack so we don't crash when throwing the exception */
2713 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2714 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2715 MONO_ADD_INS (cfg->cbb, ins);
2717 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2718 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2722 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2724 return (MonoInst*)call;
2728 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2731 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2733 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2736 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2737 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2739 #ifndef DISABLE_REMOTING
2740 gboolean might_be_remote = FALSE;
2742 gboolean virtual = this_ins != NULL;
2743 gboolean enable_for_aot = TRUE;
2746 MonoInst *call_target = NULL;
2748 gboolean need_unbox_trampoline;
2751 sig = mono_method_signature (method);
2753 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
2754 MonoInst *icall_args [16];
2757 // FIXME: Optimize this
2759 guint32 imt_slot = mono_method_get_imt_slot (method);
2761 icall_args [0] = this_ins;
2762 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
2764 icall_args [2] = imt_arg;
2766 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, method);
2767 icall_args [2] = ins;
2769 EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
2771 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
2775 rgctx_reg = mono_alloc_preg (cfg);
2776 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2779 if (method->string_ctor) {
2780 /* Create the real signature */
2781 /* FIXME: Cache these */
2782 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2783 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2788 context_used = mini_method_check_context_used (cfg, method);
2790 #ifndef DISABLE_REMOTING
2791 might_be_remote = this_ins && sig->hasthis &&
2792 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2793 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2795 if (might_be_remote && context_used) {
2798 g_assert (cfg->gshared);
2800 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2802 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2806 if (cfg->llvm_only && !call_target && virtual && (method->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
2807 // FIXME: Vcall optimizations below
2808 MonoInst *icall_args [16];
2811 if (sig->generic_param_count) {
2813 * Generic virtual call, pass the concrete method as the imt argument.
2815 imt_arg = emit_get_rgctx_method (cfg, context_used,
2816 method, MONO_RGCTX_INFO_METHOD);
2819 // FIXME: Optimize this
2821 int slot = mono_method_get_vtable_index (method);
2823 icall_args [0] = this_ins;
2824 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
2826 icall_args [2] = imt_arg;
2828 EMIT_NEW_PCONST (cfg, ins, NULL);
2829 icall_args [2] = ins;
2831 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall, icall_args);
2834 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2836 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2838 #ifndef DISABLE_REMOTING
2839 if (might_be_remote)
2840 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2843 call->method = method;
2844 call->inst.flags |= MONO_INST_HAS_METHOD;
2845 call->inst.inst_left = this_ins;
2846 call->tail_call = tail;
2849 int vtable_reg, slot_reg, this_reg;
2852 this_reg = this_ins->dreg;
2854 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2855 MonoInst *dummy_use;
2857 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2859 /* Make a call to delegate->invoke_impl */
2860 call->inst.inst_basereg = this_reg;
2861 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2862 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2864 /* We must emit a dummy use here because the delegate trampoline will
2865 replace the 'this' argument with the delegate target making this activation
2866 no longer a root for the delegate.
2867 This is an issue for delegates that target collectible code such as dynamic
2868 methods of GC'able assemblies.
2870 For a test case look into #667921.
2872 FIXME: a dummy use is not the best way to do it as the local register allocator
2873 will put it on a caller save register and spil it around the call.
2874 Ideally, we would either put it on a callee save register or only do the store part.
2876 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2878 return (MonoInst*)call;
2881 if ((!cfg->compile_aot || enable_for_aot) &&
2882 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2883 (MONO_METHOD_IS_FINAL (method) &&
2884 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2885 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2887 * the method is not virtual, we just need to ensure this is not null
2888 * and then we can call the method directly.
2890 #ifndef DISABLE_REMOTING
2891 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2893 * The check above ensures method is not gshared, this is needed since
2894 * gshared methods can't have wrappers.
2896 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2900 if (!method->string_ctor)
2901 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2903 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2904 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2906 * the method is virtual, but we can statically dispatch since either
2907 * it's class or the method itself are sealed.
2908 * But first we need to ensure it's not a null reference.
2910 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2912 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2913 } else if (call_target) {
2914 vtable_reg = alloc_preg (cfg);
2915 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2917 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2918 call->inst.sreg1 = call_target->dreg;
2919 call->inst.flags &= !MONO_INST_HAS_METHOD;
2921 vtable_reg = alloc_preg (cfg);
2922 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2923 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2924 guint32 imt_slot = mono_method_get_imt_slot (method);
2925 emit_imt_argument (cfg, call, call->method, imt_arg);
2926 slot_reg = vtable_reg;
2927 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2929 slot_reg = vtable_reg;
2930 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2931 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2933 g_assert (mono_method_signature (method)->generic_param_count);
2934 emit_imt_argument (cfg, call, call->method, imt_arg);
2938 call->inst.sreg1 = slot_reg;
2939 call->inst.inst_offset = offset;
2940 call->is_virtual = TRUE;
2944 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2947 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2949 return (MonoInst*)call;
2953 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2955 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2959 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2966 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2969 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2971 return (MonoInst*)call;
2975 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2977 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2981 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2985 * mono_emit_abs_call:
2987 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2989 inline static MonoInst*
2990 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2991 MonoMethodSignature *sig, MonoInst **args)
2993 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2997 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
3000 if (cfg->abs_patches == NULL)
3001 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3002 g_hash_table_insert (cfg->abs_patches, ji, ji);
3003 ins = mono_emit_native_call (cfg, ji, sig, args);
3004 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3009 direct_icalls_enabled (MonoCompile *cfg)
3011 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3013 if (cfg->compile_llvm)
3016 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3022 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3025 * Call the jit icall without a wrapper if possible.
3026 * The wrapper is needed for the following reasons:
3027 * - to handle exceptions thrown using mono_raise_exceptions () from the
3028 * icall function. The EH code needs the lmf frame pushed by the
3029 * wrapper to be able to unwind back to managed code.
3030 * - to be able to do stack walks for asynchronously suspended
3031 * threads when debugging.
3033 if (info->no_raise && direct_icalls_enabled (cfg)) {
3037 if (!info->wrapper_method) {
3038 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3039 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3041 mono_memory_barrier ();
3045 * Inline the wrapper method, which is basically a call to the C icall, and
3046 * an exception check.
3048 costs = inline_method (cfg, info->wrapper_method, NULL,
3049 args, NULL, cfg->real_offset, TRUE);
3050 g_assert (costs > 0);
3051 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3055 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3060 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3062 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3063 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3067 * Native code might return non register sized integers
3068 * without initializing the upper bits.
3070 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3071 case OP_LOADI1_MEMBASE:
3072 widen_op = OP_ICONV_TO_I1;
3074 case OP_LOADU1_MEMBASE:
3075 widen_op = OP_ICONV_TO_U1;
3077 case OP_LOADI2_MEMBASE:
3078 widen_op = OP_ICONV_TO_I2;
3080 case OP_LOADU2_MEMBASE:
3081 widen_op = OP_ICONV_TO_U2;
3087 if (widen_op != -1) {
3088 int dreg = alloc_preg (cfg);
3091 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3092 widen->type = ins->type;
3102 get_memcpy_method (void)
3104 static MonoMethod *memcpy_method = NULL;
3105 if (!memcpy_method) {
3106 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3108 g_error ("Old corlib found. Install a new one");
3110 return memcpy_method;
3114 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3116 MonoClassField *field;
3117 gpointer iter = NULL;
3119 while ((field = mono_class_get_fields (klass, &iter))) {
3122 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3124 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3125 if (mini_type_is_reference (mono_field_get_type (field))) {
3126 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3127 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3129 MonoClass *field_class = mono_class_from_mono_type (field->type);
3130 if (field_class->has_references)
3131 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3137 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3139 int card_table_shift_bits;
3140 gpointer card_table_mask;
3142 MonoInst *dummy_use;
3143 int nursery_shift_bits;
3144 size_t nursery_size;
3146 if (!cfg->gen_write_barriers)
3149 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3151 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3153 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3156 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3157 wbarrier->sreg1 = ptr->dreg;
3158 wbarrier->sreg2 = value->dreg;
3159 MONO_ADD_INS (cfg->cbb, wbarrier);
3160 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3161 int offset_reg = alloc_preg (cfg);
3162 int card_reg = alloc_preg (cfg);
3165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3166 if (card_table_mask)
3167 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3169 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3170 * IMM's larger than 32bits.
3172 if (cfg->compile_aot) {
3173 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3175 MONO_INST_NEW (cfg, ins, OP_PCONST);
3176 ins->inst_p0 = card_table;
3177 ins->dreg = card_reg;
3178 MONO_ADD_INS (cfg->cbb, ins);
3181 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3182 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3184 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3185 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3188 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3192 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3194 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3195 unsigned need_wb = 0;
3200 /*types with references can't have alignment smaller than sizeof(void*) */
3201 if (align < SIZEOF_VOID_P)
3204 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3205 if (size > 32 * SIZEOF_VOID_P)
3208 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3210 /* We don't unroll more than 5 stores to avoid code bloat. */
3211 if (size > 5 * SIZEOF_VOID_P) {
3212 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3213 size += (SIZEOF_VOID_P - 1);
3214 size &= ~(SIZEOF_VOID_P - 1);
3216 EMIT_NEW_ICONST (cfg, iargs [2], size);
3217 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3218 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3222 destreg = iargs [0]->dreg;
3223 srcreg = iargs [1]->dreg;
3226 dest_ptr_reg = alloc_preg (cfg);
3227 tmp_reg = alloc_preg (cfg);
3230 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3232 while (size >= SIZEOF_VOID_P) {
3233 MonoInst *load_inst;
3234 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3235 load_inst->dreg = tmp_reg;
3236 load_inst->inst_basereg = srcreg;
3237 load_inst->inst_offset = offset;
3238 MONO_ADD_INS (cfg->cbb, load_inst);
3240 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3243 emit_write_barrier (cfg, iargs [0], load_inst);
3245 offset += SIZEOF_VOID_P;
3246 size -= SIZEOF_VOID_P;
3249 /*tmp += sizeof (void*)*/
3250 if (size >= SIZEOF_VOID_P) {
3251 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3252 MONO_ADD_INS (cfg->cbb, iargs [0]);
3256 /* Those cannot be references since size < sizeof (void*) */
3258 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3259 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3265 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3266 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3272 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3273 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3282 * Emit code to copy a valuetype of type @klass whose address is stored in
3283 * @src->dreg to memory whose address is stored at @dest->dreg.
3286 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3288 MonoInst *iargs [4];
3291 MonoMethod *memcpy_method;
3292 MonoInst *size_ins = NULL;
3293 MonoInst *memcpy_ins = NULL;
3297 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3300 * This check breaks with spilled vars... need to handle it during verification anyway.
3301 * g_assert (klass && klass == src->klass && klass == dest->klass);
3304 if (mini_is_gsharedvt_klass (klass)) {
3306 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3307 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3311 n = mono_class_native_size (klass, &align);
3313 n = mono_class_value_size (klass, &align);
3315 /* if native is true there should be no references in the struct */
3316 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3317 /* Avoid barriers when storing to the stack */
3318 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3319 (dest->opcode == OP_LDADDR))) {
3325 context_used = mini_class_check_context_used (cfg, klass);
3327 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3328 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3330 } else if (context_used) {
3331 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3333 if (cfg->compile_aot) {
3334 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3336 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3337 mono_class_compute_gc_descriptor (klass);
3342 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3344 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3349 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3350 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3351 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3356 iargs [2] = size_ins;
3358 EMIT_NEW_ICONST (cfg, iargs [2], n);
3360 memcpy_method = get_memcpy_method ();
3362 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3364 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3369 get_memset_method (void)
3371 static MonoMethod *memset_method = NULL;
3372 if (!memset_method) {
3373 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3375 g_error ("Old corlib found. Install a new one");
3377 return memset_method;
3381 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3383 MonoInst *iargs [3];
3386 MonoMethod *memset_method;
3387 MonoInst *size_ins = NULL;
3388 MonoInst *bzero_ins = NULL;
3389 static MonoMethod *bzero_method;
3391 /* FIXME: Optimize this for the case when dest is an LDADDR */
3392 mono_class_init (klass);
3393 if (mini_is_gsharedvt_klass (klass)) {
3394 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3395 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3397 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3398 g_assert (bzero_method);
3400 iargs [1] = size_ins;
3401 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3405 n = mono_class_value_size (klass, &align);
3407 if (n <= sizeof (gpointer) * 8) {
3408 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3411 memset_method = get_memset_method ();
3413 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3414 EMIT_NEW_ICONST (cfg, iargs [2], n);
3415 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3422 * Emit IR to return either the this pointer for instance method,
3423 * or the mrgctx for static methods.
3426 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3428 MonoInst *this_ins = NULL;
3430 g_assert (cfg->gshared);
3432 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3433 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3434 !method->klass->valuetype)
3435 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3437 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3438 MonoInst *mrgctx_loc, *mrgctx_var;
3440 g_assert (!this_ins);
3441 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3443 mrgctx_loc = mono_get_vtable_var (cfg);
3444 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3447 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3448 MonoInst *vtable_loc, *vtable_var;
3450 g_assert (!this_ins);
3452 vtable_loc = mono_get_vtable_var (cfg);
3453 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3455 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3456 MonoInst *mrgctx_var = vtable_var;
3459 vtable_reg = alloc_preg (cfg);
3460 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3461 vtable_var->type = STACK_PTR;
3469 vtable_reg = alloc_preg (cfg);
3470 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3475 static MonoJumpInfoRgctxEntry *
3476 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3478 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3479 res->method = method;
3480 res->in_mrgctx = in_mrgctx;
3481 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3482 res->data->type = patch_type;
3483 res->data->data.target = patch_data;
3484 res->info_type = info_type;
3489 static inline MonoInst*
3490 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3492 MonoInst *args [16];
3495 // FIXME: No fastpath since the slot is not a compile time constant
3497 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3498 if (entry->in_mrgctx)
3499 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3501 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3505 * FIXME: This can be called during decompose, which is a problem since it creates
3507 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3509 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3511 MonoBasicBlock *is_null_bb, *end_bb;
3512 MonoInst *res, *ins, *call;
3515 slot = mini_get_rgctx_entry_slot (entry);
3517 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3518 index = MONO_RGCTX_SLOT_INDEX (slot);
3520 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3521 for (depth = 0; ; ++depth) {
3522 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3524 if (index < size - 1)
3529 NEW_BBLOCK (cfg, end_bb);
3530 NEW_BBLOCK (cfg, is_null_bb);
3533 rgctx_reg = rgctx->dreg;
3535 rgctx_reg = alloc_preg (cfg);
3537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3538 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3539 NEW_BBLOCK (cfg, is_null_bb);
3541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3542 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3545 for (i = 0; i < depth; ++i) {
3546 int array_reg = alloc_preg (cfg);
3548 /* load ptr to next array */
3549 if (mrgctx && i == 0)
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3553 rgctx_reg = array_reg;
3554 /* is the ptr null? */
3555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3556 /* if yes, jump to actual trampoline */
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3561 val_reg = alloc_preg (cfg);
3562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3563 /* is the slot null? */
3564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3565 /* if yes, jump to actual trampoline */
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3569 res_reg = alloc_preg (cfg);
3570 MONO_INST_NEW (cfg, ins, OP_MOVE);
3571 ins->dreg = res_reg;
3572 ins->sreg1 = val_reg;
3573 MONO_ADD_INS (cfg->cbb, ins);
3575 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3578 MONO_START_BB (cfg, is_null_bb);
3580 EMIT_NEW_ICONST (cfg, args [1], index);
3582 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3584 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3585 MONO_INST_NEW (cfg, ins, OP_MOVE);
3586 ins->dreg = res_reg;
3587 ins->sreg1 = call->dreg;
3588 MONO_ADD_INS (cfg->cbb, ins);
3589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3591 MONO_START_BB (cfg, end_bb);
3600 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3603 static inline MonoInst*
3604 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3607 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3609 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3613 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3614 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3616 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3617 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3619 return emit_rgctx_fetch (cfg, rgctx, entry);
3623 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3624 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3626 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3627 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3629 return emit_rgctx_fetch (cfg, rgctx, entry);
3633 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3634 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3636 MonoJumpInfoGSharedVtCall *call_info;
3637 MonoJumpInfoRgctxEntry *entry;
3640 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3641 call_info->sig = sig;
3642 call_info->method = cmethod;
3644 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3645 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3647 return emit_rgctx_fetch (cfg, rgctx, entry);
3651 * emit_get_rgctx_virt_method:
3653 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3656 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3657 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3659 MonoJumpInfoVirtMethod *info;
3660 MonoJumpInfoRgctxEntry *entry;
3663 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3664 info->klass = klass;
3665 info->method = virt_method;
3667 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3668 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3670 return emit_rgctx_fetch (cfg, rgctx, entry);
3674 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3675 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3677 MonoJumpInfoRgctxEntry *entry;
3680 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3681 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3683 return emit_rgctx_fetch (cfg, rgctx, entry);
3687 * emit_get_rgctx_method:
3689 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3690 * normal constants, else emit a load from the rgctx.
3693 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3694 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3696 if (!context_used) {
3699 switch (rgctx_type) {
3700 case MONO_RGCTX_INFO_METHOD:
3701 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3703 case MONO_RGCTX_INFO_METHOD_RGCTX:
3704 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3707 g_assert_not_reached ();
3710 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3711 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3713 return emit_rgctx_fetch (cfg, rgctx, entry);
3718 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3719 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3721 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3722 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3724 return emit_rgctx_fetch (cfg, rgctx, entry);
3728 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3730 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3731 MonoRuntimeGenericContextInfoTemplate *template;
3736 for (i = 0; i < info->num_entries; ++i) {
3737 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3739 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3743 if (info->num_entries == info->count_entries) {
3744 MonoRuntimeGenericContextInfoTemplate *new_entries;
3745 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3747 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3749 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3750 info->entries = new_entries;
3751 info->count_entries = new_count_entries;
3754 idx = info->num_entries;
3755 template = &info->entries [idx];
3756 template->info_type = rgctx_type;
3757 template->data = data;
3759 info->num_entries ++;
3765 * emit_get_gsharedvt_info:
3767 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3770 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3775 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3776 /* Load info->entries [idx] */
3777 dreg = alloc_preg (cfg);
3778 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3784 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3786 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3790 * On return the caller must check @klass for load errors.
3793 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3795 MonoInst *vtable_arg;
3798 context_used = mini_class_check_context_used (cfg, klass);
3801 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3802 klass, MONO_RGCTX_INFO_VTABLE);
3804 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3808 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3811 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3815 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3816 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3818 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3819 ins->sreg1 = vtable_arg->dreg;
3820 MONO_ADD_INS (cfg->cbb, ins);
3822 static int byte_offset = -1;
3823 static guint8 bitmask;
3824 int bits_reg, inited_reg;
3825 MonoBasicBlock *inited_bb;
3826 MonoInst *args [16];
3828 if (byte_offset < 0)
3829 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3831 bits_reg = alloc_ireg (cfg);
3832 inited_reg = alloc_ireg (cfg);
3834 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3837 NEW_BBLOCK (cfg, inited_bb);
3839 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3840 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3842 args [0] = vtable_arg;
3843 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3845 MONO_START_BB (cfg, inited_bb);
3850 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3854 if (cfg->gen_seq_points && cfg->method == method) {
3855 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3857 ins->flags |= MONO_INST_NONEMPTY_STACK;
3858 MONO_ADD_INS (cfg->cbb, ins);
3863 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3865 if (mini_get_debug_options ()->better_cast_details) {
3866 int vtable_reg = alloc_preg (cfg);
3867 int klass_reg = alloc_preg (cfg);
3868 MonoBasicBlock *is_null_bb = NULL;
3870 int to_klass_reg, context_used;
3873 NEW_BBLOCK (cfg, is_null_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3879 tls_get = mono_get_jit_tls_intrinsic (cfg);
3881 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3885 MONO_ADD_INS (cfg->cbb, tls_get);
3886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3889 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3891 context_used = mini_class_check_context_used (cfg, klass);
3893 MonoInst *class_ins;
3895 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3896 to_klass_reg = class_ins->dreg;
3898 to_klass_reg = alloc_preg (cfg);
3899 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3904 MONO_START_BB (cfg, is_null_bb);
3909 reset_cast_details (MonoCompile *cfg)
3911 /* Reset the variables holding the cast details */
3912 if (mini_get_debug_options ()->better_cast_details) {
3913 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3915 MONO_ADD_INS (cfg->cbb, tls_get);
3916 /* It is enough to reset the from field */
3917 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3922 * On return the caller must check @array_class for load errors
3925 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3927 int vtable_reg = alloc_preg (cfg);
3930 context_used = mini_class_check_context_used (cfg, array_class);
3932 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3934 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3936 if (cfg->opt & MONO_OPT_SHARED) {
3937 int class_reg = alloc_preg (cfg);
3938 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3939 if (cfg->compile_aot) {
3940 int klass_reg = alloc_preg (cfg);
3941 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3942 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3944 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3946 } else if (context_used) {
3947 MonoInst *vtable_ins;
3949 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3950 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3952 if (cfg->compile_aot) {
3956 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3958 vt_reg = alloc_preg (cfg);
3959 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3960 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3963 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3965 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3969 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3971 reset_cast_details (cfg);
3975 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3976 * generic code is generated.
3979 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3981 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3984 MonoInst *rgctx, *addr;
3986 /* FIXME: What if the class is shared? We might not
3987 have to get the address of the method from the
3989 addr = emit_get_rgctx_method (cfg, context_used, method,
3990 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3992 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3994 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3996 gboolean pass_vtable, pass_mrgctx;
3997 MonoInst *rgctx_arg = NULL;
3999 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4000 g_assert (!pass_mrgctx);
4003 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4006 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4009 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4014 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4018 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4019 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4020 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4021 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4023 obj_reg = sp [0]->dreg;
4024 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4025 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4027 /* FIXME: generics */
4028 g_assert (klass->rank == 0);
4031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4032 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4034 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4038 MonoInst *element_class;
4040 /* This assertion is from the unboxcast insn */
4041 g_assert (klass->rank == 0);
4043 element_class = emit_get_rgctx_klass (cfg, context_used,
4044 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4046 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4047 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4049 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4050 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4051 reset_cast_details (cfg);
4054 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4055 MONO_ADD_INS (cfg->cbb, add);
4056 add->type = STACK_MP;
4063 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4065 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4066 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4070 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4076 args [1] = klass_inst;
4079 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4081 NEW_BBLOCK (cfg, is_ref_bb);
4082 NEW_BBLOCK (cfg, is_nullable_bb);
4083 NEW_BBLOCK (cfg, end_bb);
4084 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4086 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4091 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4092 addr_reg = alloc_dreg (cfg, STACK_MP);
4096 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4097 MONO_ADD_INS (cfg->cbb, addr);
4099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4102 MONO_START_BB (cfg, is_ref_bb);
4104 /* Save the ref to a temporary */
4105 dreg = alloc_ireg (cfg);
4106 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4107 addr->dreg = addr_reg;
4108 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4109 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4112 MONO_START_BB (cfg, is_nullable_bb);
4115 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4116 MonoInst *unbox_call;
4117 MonoMethodSignature *unbox_sig;
4119 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4120 unbox_sig->ret = &klass->byval_arg;
4121 unbox_sig->param_count = 1;
4122 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4123 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4125 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4126 addr->dreg = addr_reg;
4129 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4132 MONO_START_BB (cfg, end_bb);
4135 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4141 * Returns NULL and set the cfg exception on error.
4144 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4146 MonoInst *iargs [2];
4152 MonoInst *iargs [2];
4153 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4155 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4157 if (cfg->opt & MONO_OPT_SHARED)
4158 rgctx_info = MONO_RGCTX_INFO_KLASS;
4160 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4161 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4163 if (cfg->opt & MONO_OPT_SHARED) {
4164 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4166 alloc_ftn = mono_object_new;
4169 alloc_ftn = mono_object_new_specific;
4172 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4173 if (known_instance_size) {
4174 int size = mono_class_instance_size (klass);
4175 if (size < sizeof (MonoObject))
4176 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4178 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4180 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4183 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4186 if (cfg->opt & MONO_OPT_SHARED) {
4187 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4188 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4190 alloc_ftn = mono_object_new;
4191 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4192 /* This happens often in argument checking code, eg. throw new FooException... */
4193 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4194 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4195 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4197 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4198 MonoMethod *managed_alloc = NULL;
4202 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4203 cfg->exception_ptr = klass;
4207 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4209 if (managed_alloc) {
4210 int size = mono_class_instance_size (klass);
4211 if (size < sizeof (MonoObject))
4212 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4214 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4215 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4216 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4218 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4220 guint32 lw = vtable->klass->instance_size;
4221 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4222 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4223 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4226 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4230 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4234 * Returns NULL and set the cfg exception on error.
4237 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4239 MonoInst *alloc, *ins;
4241 if (mono_class_is_nullable (klass)) {
4242 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4245 /* FIXME: What if the class is shared? We might not
4246 have to get the method address from the RGCTX. */
4247 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4248 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4249 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4251 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4253 gboolean pass_vtable, pass_mrgctx;
4254 MonoInst *rgctx_arg = NULL;
4256 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4257 g_assert (!pass_mrgctx);
4260 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4263 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4266 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4270 if (mini_is_gsharedvt_klass (klass)) {
4271 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4272 MonoInst *res, *is_ref, *src_var, *addr;
4275 dreg = alloc_ireg (cfg);
4277 NEW_BBLOCK (cfg, is_ref_bb);
4278 NEW_BBLOCK (cfg, is_nullable_bb);
4279 NEW_BBLOCK (cfg, end_bb);
4280 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4285 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4288 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4291 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4292 ins->opcode = OP_STOREV_MEMBASE;
4294 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4295 res->type = STACK_OBJ;
4297 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4300 MONO_START_BB (cfg, is_ref_bb);
4302 /* val is a vtype, so has to load the value manually */
4303 src_var = get_vreg_to_inst (cfg, val->dreg);
4305 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4306 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4307 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4308 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4311 MONO_START_BB (cfg, is_nullable_bb);
4314 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4315 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4317 MonoMethodSignature *box_sig;
4320 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4321 * construct that method at JIT time, so have to do things by hand.
4323 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4324 box_sig->ret = &mono_defaults.object_class->byval_arg;
4325 box_sig->param_count = 1;
4326 box_sig->params [0] = &klass->byval_arg;
4327 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4328 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4329 res->type = STACK_OBJ;
4333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4335 MONO_START_BB (cfg, end_bb);
4339 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4343 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4349 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4352 MonoGenericContainer *container;
4353 MonoGenericInst *ginst;
4355 if (klass->generic_class) {
4356 container = klass->generic_class->container_class->generic_container;
4357 ginst = klass->generic_class->context.class_inst;
4358 } else if (klass->generic_container && context_used) {
4359 container = klass->generic_container;
4360 ginst = container->context.class_inst;
4365 for (i = 0; i < container->type_argc; ++i) {
4367 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4369 type = ginst->type_argv [i];
4370 if (mini_type_is_reference (type))
4376 static GHashTable* direct_icall_type_hash;
4379 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4381 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4382 if (!direct_icalls_enabled (cfg))
4386 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4387 * Whitelist a few icalls for now.
4389 if (!direct_icall_type_hash) {
4390 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4392 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4393 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4394 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4395 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4396 mono_memory_barrier ();
4397 direct_icall_type_hash = h;
4400 if (cmethod->klass == mono_defaults.math_class)
4402 /* No locking needed */
4403 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4408 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4411 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4413 MonoMethod *mono_castclass;
4416 mono_castclass = mono_marshal_get_castclass_with_cache ();
4418 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4419 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4420 reset_cast_details (cfg);
4426 get_castclass_cache_idx (MonoCompile *cfg)
4428 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4429 cfg->castclass_cache_index ++;
4430 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4434 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4443 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4446 if (cfg->compile_aot) {
4447 idx = get_castclass_cache_idx (cfg);
4448 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4450 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4453 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4454 return emit_castclass_with_cache (cfg, klass, args);
4458 * Returns NULL and set the cfg exception on error.
4461 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4463 MonoBasicBlock *is_null_bb;
4464 int obj_reg = src->dreg;
4465 int vtable_reg = alloc_preg (cfg);
4467 MonoInst *klass_inst = NULL, *res;
4469 context_used = mini_class_check_context_used (cfg, klass);
4471 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4472 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4473 (*inline_costs) += 2;
4475 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4476 MonoMethod *mono_castclass;
4477 MonoInst *iargs [1];
4480 mono_castclass = mono_marshal_get_castclass (klass);
4483 save_cast_details (cfg, klass, src->dreg, TRUE);
4484 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4485 iargs, ip, cfg->real_offset, TRUE);
4486 reset_cast_details (cfg);
4487 CHECK_CFG_EXCEPTION;
4488 g_assert (costs > 0);
4490 cfg->real_offset += 5;
4492 (*inline_costs) += costs;
4500 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4501 MonoInst *cache_ins;
4503 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4508 /* klass - it's the second element of the cache entry*/
4509 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4512 args [2] = cache_ins;
4514 return emit_castclass_with_cache (cfg, klass, args);
4517 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4520 NEW_BBLOCK (cfg, is_null_bb);
4522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4523 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4525 save_cast_details (cfg, klass, obj_reg, FALSE);
4527 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4528 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4529 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4531 int klass_reg = alloc_preg (cfg);
4533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4535 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4536 /* the remoting code is broken, access the class for now */
4537 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4538 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4540 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4541 cfg->exception_ptr = klass;
4544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4549 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4552 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4556 MONO_START_BB (cfg, is_null_bb);
4558 reset_cast_details (cfg);
4567 * Returns NULL and set the cfg exception on error.
4570 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4573 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4574 int obj_reg = src->dreg;
4575 int vtable_reg = alloc_preg (cfg);
4576 int res_reg = alloc_ireg_ref (cfg);
4577 MonoInst *klass_inst = NULL;
4582 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4583 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4584 MonoInst *cache_ins;
4586 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4591 /* klass - it's the second element of the cache entry*/
4592 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4595 args [2] = cache_ins;
4597 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4600 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4603 NEW_BBLOCK (cfg, is_null_bb);
4604 NEW_BBLOCK (cfg, false_bb);
4605 NEW_BBLOCK (cfg, end_bb);
4607 /* Do the assignment at the beginning, so the other assignment can be if converted */
4608 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4609 ins->type = STACK_OBJ;
4612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4613 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4617 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4618 g_assert (!context_used);
4619 /* the is_null_bb target simply copies the input register to the output */
4620 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4622 int klass_reg = alloc_preg (cfg);
4625 int rank_reg = alloc_preg (cfg);
4626 int eclass_reg = alloc_preg (cfg);
4628 g_assert (!context_used);
4629 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4634 if (klass->cast_class == mono_defaults.object_class) {
4635 int parent_reg = alloc_preg (cfg);
4636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4637 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4638 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4639 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4640 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4641 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4642 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4644 } else if (klass->cast_class == mono_defaults.enum_class) {
4645 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4646 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4647 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4648 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4650 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4651 /* Check that the object is a vector too */
4652 int bounds_reg = alloc_preg (cfg);
4653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4658 /* the is_null_bb target simply copies the input register to the output */
4659 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4661 } else if (mono_class_is_nullable (klass)) {
4662 g_assert (!context_used);
4663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4664 /* the is_null_bb target simply copies the input register to the output */
4665 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4667 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4668 g_assert (!context_used);
4669 /* the remoting code is broken, access the class for now */
4670 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4671 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4673 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4674 cfg->exception_ptr = klass;
4677 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4686 /* the is_null_bb target simply copies the input register to the output */
4687 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4692 MONO_START_BB (cfg, false_bb);
4694 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4697 MONO_START_BB (cfg, is_null_bb);
4699 MONO_START_BB (cfg, end_bb);
4705 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4707 /* This opcode takes as input an object reference and a class, and returns:
4708 0) if the object is an instance of the class,
4709 1) if the object is not instance of the class,
4710 2) if the object is a proxy whose type cannot be determined */
4713 #ifndef DISABLE_REMOTING
4714 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4716 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4718 int obj_reg = src->dreg;
4719 int dreg = alloc_ireg (cfg);
4721 #ifndef DISABLE_REMOTING
4722 int klass_reg = alloc_preg (cfg);
4725 NEW_BBLOCK (cfg, true_bb);
4726 NEW_BBLOCK (cfg, false_bb);
4727 NEW_BBLOCK (cfg, end_bb);
4728 #ifndef DISABLE_REMOTING
4729 NEW_BBLOCK (cfg, false2_bb);
4730 NEW_BBLOCK (cfg, no_proxy_bb);
4733 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4736 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4737 #ifndef DISABLE_REMOTING
4738 NEW_BBLOCK (cfg, interface_fail_bb);
4741 tmp_reg = alloc_preg (cfg);
4742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4743 #ifndef DISABLE_REMOTING
4744 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4745 MONO_START_BB (cfg, interface_fail_bb);
4746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4748 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4750 tmp_reg = alloc_preg (cfg);
4751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4753 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4755 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4758 #ifndef DISABLE_REMOTING
4759 tmp_reg = alloc_preg (cfg);
4760 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4763 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4764 tmp_reg = alloc_preg (cfg);
4765 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4768 tmp_reg = alloc_preg (cfg);
4769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4770 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4773 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4774 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4776 MONO_START_BB (cfg, no_proxy_bb);
4778 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4780 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4784 MONO_START_BB (cfg, false_bb);
4786 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4787 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4789 #ifndef DISABLE_REMOTING
4790 MONO_START_BB (cfg, false2_bb);
4792 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4793 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4796 MONO_START_BB (cfg, true_bb);
4798 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4800 MONO_START_BB (cfg, end_bb);
4803 MONO_INST_NEW (cfg, ins, OP_ICONST);
4805 ins->type = STACK_I4;
4811 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4813 /* This opcode takes as input an object reference and a class, and returns:
4814 0) if the object is an instance of the class,
4815 1) if the object is a proxy whose type cannot be determined
4816 an InvalidCastException exception is thrown otherwhise*/
4819 #ifndef DISABLE_REMOTING
4820 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4822 MonoBasicBlock *ok_result_bb;
4824 int obj_reg = src->dreg;
4825 int dreg = alloc_ireg (cfg);
4826 int tmp_reg = alloc_preg (cfg);
4828 #ifndef DISABLE_REMOTING
4829 int klass_reg = alloc_preg (cfg);
4830 NEW_BBLOCK (cfg, end_bb);
4833 NEW_BBLOCK (cfg, ok_result_bb);
4835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4836 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4838 save_cast_details (cfg, klass, obj_reg, FALSE);
4840 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4841 #ifndef DISABLE_REMOTING
4842 NEW_BBLOCK (cfg, interface_fail_bb);
4844 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4845 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4846 MONO_START_BB (cfg, interface_fail_bb);
4847 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4849 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4851 tmp_reg = alloc_preg (cfg);
4852 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4854 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4856 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4857 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4859 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4860 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4861 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4864 #ifndef DISABLE_REMOTING
4865 NEW_BBLOCK (cfg, no_proxy_bb);
4867 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4868 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4869 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4871 tmp_reg = alloc_preg (cfg);
4872 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4873 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4875 tmp_reg = alloc_preg (cfg);
4876 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4878 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4880 NEW_BBLOCK (cfg, fail_1_bb);
4882 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4884 MONO_START_BB (cfg, fail_1_bb);
4886 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4887 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4889 MONO_START_BB (cfg, no_proxy_bb);
4891 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4893 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4897 MONO_START_BB (cfg, ok_result_bb);
4899 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4901 #ifndef DISABLE_REMOTING
4902 MONO_START_BB (cfg, end_bb);
4906 MONO_INST_NEW (cfg, ins, OP_ICONST);
4908 ins->type = STACK_I4;
4913 static G_GNUC_UNUSED MonoInst*
4914 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4916 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4917 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4920 switch (enum_type->type) {
4923 #if SIZEOF_REGISTER == 8
4935 MonoInst *load, *and, *cmp, *ceq;
4936 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4937 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4938 int dest_reg = alloc_ireg (cfg);
4940 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4941 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4942 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4943 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4945 ceq->type = STACK_I4;
4948 load = mono_decompose_opcode (cfg, load);
4949 and = mono_decompose_opcode (cfg, and);
4950 cmp = mono_decompose_opcode (cfg, cmp);
4951 ceq = mono_decompose_opcode (cfg, ceq);
4959 * Returns NULL and set the cfg exception on error.
4961 static G_GNUC_UNUSED MonoInst*
4962 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4966 gpointer trampoline;
4967 MonoInst *obj, *method_ins, *tramp_ins;
4971 if (virtual && !cfg->llvm_only) {
4972 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4975 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4979 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4983 if (cfg->llvm_only) {
4984 MonoInst *args [16];
4987 * If the method to be called needs an rgctx, we can't fall back to mono_delegate_ctor (), since it might receive
4988 * the address of a gshared method. So use a JIT icall.
4989 * FIXME: Optimize this.
4993 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4994 mono_emit_jit_icall (cfg, virtual ? mono_init_delegate_virtual : mono_init_delegate, args);
4999 /* Inline the contents of mono_delegate_ctor */
5001 /* Set target field */
5002 /* Optimize away setting of NULL target */
5003 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5004 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5005 if (cfg->gen_write_barriers) {
5006 dreg = alloc_preg (cfg);
5007 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5008 emit_write_barrier (cfg, ptr, target);
5012 /* Set method field */
5013 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5014 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5017 * To avoid looking up the compiled code belonging to the target method
5018 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5019 * store it, and we fill it after the method has been compiled.
5021 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5022 MonoInst *code_slot_ins;
5025 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5027 domain = mono_domain_get ();
5028 mono_domain_lock (domain);
5029 if (!domain_jit_info (domain)->method_code_hash)
5030 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5031 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5033 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
5034 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5036 mono_domain_unlock (domain);
5038 if (cfg->compile_aot)
5039 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5041 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
5043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5046 if (cfg->compile_aot) {
5047 MonoDelegateClassMethodPair *del_tramp;
5049 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5050 del_tramp->klass = klass;
5051 del_tramp->method = context_used ? NULL : method;
5052 del_tramp->is_virtual = virtual;
5053 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5056 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5058 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5059 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5062 /* Set invoke_impl field */
5064 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5066 dreg = alloc_preg (cfg);
5067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5068 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5070 dreg = alloc_preg (cfg);
5071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5072 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5075 dreg = alloc_preg (cfg);
5076 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual ? 1 : 0);
5077 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5079 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5085 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5087 MonoJitICallInfo *info;
5089 /* Need to register the icall so it gets an icall wrapper */
5090 info = mono_get_array_new_va_icall (rank);
5092 cfg->flags |= MONO_CFG_HAS_VARARGS;
5094 /* mono_array_new_va () needs a vararg calling convention */
5095 cfg->disable_llvm = TRUE;
5097 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5098 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5102 * handle_constrained_gsharedvt_call:
5104 * Handle constrained calls where the receiver is a gsharedvt type.
5105 * Return the instruction representing the call. Set the cfg exception on failure.
5108 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5109 gboolean *ref_emit_widen)
5111 MonoInst *ins = NULL;
5112 gboolean emit_widen = *ref_emit_widen;
5115 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5116 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5117 * pack the arguments into an array, and do the rest of the work in in an icall.
5119 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5120 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5121 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5122 MonoInst *args [16];
5125 * This case handles calls to
5126 * - object:ToString()/Equals()/GetHashCode(),
5127 * - System.IComparable<T>:CompareTo()
5128 * - System.IEquatable<T>:Equals ()
5129 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5133 if (mono_method_check_context_used (cmethod))
5134 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5136 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5137 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5139 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5140 if (fsig->hasthis && fsig->param_count) {
5141 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5142 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5143 ins->dreg = alloc_preg (cfg);
5144 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5145 MONO_ADD_INS (cfg->cbb, ins);
5148 if (mini_is_gsharedvt_type (fsig->params [0])) {
5151 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5153 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5154 addr_reg = ins->dreg;
5155 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5157 EMIT_NEW_ICONST (cfg, args [3], 0);
5158 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5161 EMIT_NEW_ICONST (cfg, args [3], 0);
5162 EMIT_NEW_ICONST (cfg, args [4], 0);
5164 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5167 if (mini_is_gsharedvt_type (fsig->ret)) {
5168 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5169 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5173 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5174 MONO_ADD_INS (cfg->cbb, add);
5176 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5177 MONO_ADD_INS (cfg->cbb, ins);
5178 /* ins represents the call result */
5181 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5184 *ref_emit_widen = emit_widen;
5193 mono_emit_load_got_addr (MonoCompile *cfg)
5195 MonoInst *getaddr, *dummy_use;
5197 if (!cfg->got_var || cfg->got_var_allocated)
5200 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5201 getaddr->cil_code = cfg->header->code;
5202 getaddr->dreg = cfg->got_var->dreg;
5204 /* Add it to the start of the first bblock */
5205 if (cfg->bb_entry->code) {
5206 getaddr->next = cfg->bb_entry->code;
5207 cfg->bb_entry->code = getaddr;
5210 MONO_ADD_INS (cfg->bb_entry, getaddr);
5212 cfg->got_var_allocated = TRUE;
5215 * Add a dummy use to keep the got_var alive, since real uses might
5216 * only be generated by the back ends.
5217 * Add it to end_bblock, so the variable's lifetime covers the whole
5219 * It would be better to make the usage of the got var explicit in all
5220 * cases when the backend needs it (i.e. calls, throw etc.), so this
5221 * wouldn't be needed.
5223 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5224 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5227 static int inline_limit;
5228 static gboolean inline_limit_inited;
5231 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5233 MonoMethodHeaderSummary header;
5235 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5236 MonoMethodSignature *sig = mono_method_signature (method);
5240 if (cfg->disable_inline)
5245 if (cfg->inline_depth > 10)
5248 if (!mono_method_get_header_summary (method, &header))
5251 /*runtime, icall and pinvoke are checked by summary call*/
5252 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5253 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5254 (mono_class_is_marshalbyref (method->klass)) ||
5258 /* also consider num_locals? */
5259 /* Do the size check early to avoid creating vtables */
5260 if (!inline_limit_inited) {
5261 if (g_getenv ("MONO_INLINELIMIT"))
5262 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5264 inline_limit = INLINE_LENGTH_LIMIT;
5265 inline_limit_inited = TRUE;
5267 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5271 * if we can initialize the class of the method right away, we do,
5272 * otherwise we don't allow inlining if the class needs initialization,
5273 * since it would mean inserting a call to mono_runtime_class_init()
5274 * inside the inlined code
5276 if (!(cfg->opt & MONO_OPT_SHARED)) {
5277 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5278 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5279 vtable = mono_class_vtable (cfg->domain, method->klass);
5282 if (!cfg->compile_aot)
5283 mono_runtime_class_init (vtable);
5284 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5285 if (cfg->run_cctors && method->klass->has_cctor) {
5286 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5287 if (!method->klass->runtime_info)
5288 /* No vtable created yet */
5290 vtable = mono_class_vtable (cfg->domain, method->klass);
5293 /* This makes so that inline cannot trigger */
5294 /* .cctors: too many apps depend on them */
5295 /* running with a specific order... */
5296 if (! vtable->initialized)
5298 mono_runtime_class_init (vtable);
5300 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5301 if (!method->klass->runtime_info)
5302 /* No vtable created yet */
5304 vtable = mono_class_vtable (cfg->domain, method->klass);
5307 if (!vtable->initialized)
5312 * If we're compiling for shared code
5313 * the cctor will need to be run at aot method load time, for example,
5314 * or at the end of the compilation of the inlining method.
5316 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5320 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5321 if (mono_arch_is_soft_float ()) {
5323 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5325 for (i = 0; i < sig->param_count; ++i)
5326 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5331 if (g_list_find (cfg->dont_inline, method))
5338 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5340 if (!cfg->compile_aot) {
5342 if (vtable->initialized)
5346 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5347 if (cfg->method == method)
5351 if (!mono_class_needs_cctor_run (klass, method))
5354 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5355 /* The initialization is already done before the method is called */
5362 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5366 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5369 if (mini_is_gsharedvt_variable_klass (klass)) {
5372 mono_class_init (klass);
5373 size = mono_class_array_element_size (klass);
5376 mult_reg = alloc_preg (cfg);
5377 array_reg = arr->dreg;
5378 index_reg = index->dreg;
5380 #if SIZEOF_REGISTER == 8
5381 /* The array reg is 64 bits but the index reg is only 32 */
5382 if (COMPILE_LLVM (cfg)) {
5384 index2_reg = index_reg;
5386 index2_reg = alloc_preg (cfg);
5387 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5390 if (index->type == STACK_I8) {
5391 index2_reg = alloc_preg (cfg);
5392 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5394 index2_reg = index_reg;
5399 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5401 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5402 if (size == 1 || size == 2 || size == 4 || size == 8) {
5403 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5405 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5406 ins->klass = mono_class_get_element_class (klass);
5407 ins->type = STACK_MP;
5413 add_reg = alloc_ireg_mp (cfg);
5416 MonoInst *rgctx_ins;
5419 g_assert (cfg->gshared);
5420 context_used = mini_class_check_context_used (cfg, klass);
5421 g_assert (context_used);
5422 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5423 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5427 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5428 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5429 ins->klass = mono_class_get_element_class (klass);
5430 ins->type = STACK_MP;
5431 MONO_ADD_INS (cfg->cbb, ins);
5437 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5439 int bounds_reg = alloc_preg (cfg);
5440 int add_reg = alloc_ireg_mp (cfg);
5441 int mult_reg = alloc_preg (cfg);
5442 int mult2_reg = alloc_preg (cfg);
5443 int low1_reg = alloc_preg (cfg);
5444 int low2_reg = alloc_preg (cfg);
5445 int high1_reg = alloc_preg (cfg);
5446 int high2_reg = alloc_preg (cfg);
5447 int realidx1_reg = alloc_preg (cfg);
5448 int realidx2_reg = alloc_preg (cfg);
5449 int sum_reg = alloc_preg (cfg);
5450 int index1, index2, tmpreg;
5454 mono_class_init (klass);
5455 size = mono_class_array_element_size (klass);
5457 index1 = index_ins1->dreg;
5458 index2 = index_ins2->dreg;
5460 #if SIZEOF_REGISTER == 8
5461 /* The array reg is 64 bits but the index reg is only 32 */
5462 if (COMPILE_LLVM (cfg)) {
5465 tmpreg = alloc_preg (cfg);
5466 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5468 tmpreg = alloc_preg (cfg);
5469 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5473 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5477 /* range checking */
5478 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5479 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5482 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5483 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5485 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5486 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5487 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5489 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5490 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5491 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5493 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5494 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5495 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5497 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5498 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5501 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5503 ins->type = STACK_MP;
5505 MONO_ADD_INS (cfg->cbb, ins);
5511 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5515 MonoMethod *addr_method;
5517 MonoClass *eclass = cmethod->klass->element_class;
5519 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5522 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5524 /* emit_ldelema_2 depends on OP_LMUL */
5525 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5526 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5529 if (mini_is_gsharedvt_variable_klass (eclass))
5532 element_size = mono_class_array_element_size (eclass);
5533 addr_method = mono_marshal_get_array_address (rank, element_size);
5534 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5539 static MonoBreakPolicy
5540 always_insert_breakpoint (MonoMethod *method)
5542 return MONO_BREAK_POLICY_ALWAYS;
5545 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5548 * mono_set_break_policy:
5549 * policy_callback: the new callback function
5551 * Allow embedders to decide wherther to actually obey breakpoint instructions
5552 * (both break IL instructions and Debugger.Break () method calls), for example
5553 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5554 * untrusted or semi-trusted code.
5556 * @policy_callback will be called every time a break point instruction needs to
5557 * be inserted with the method argument being the method that calls Debugger.Break()
5558 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5559 * if it wants the breakpoint to not be effective in the given method.
5560 * #MONO_BREAK_POLICY_ALWAYS is the default.
5563 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5565 if (policy_callback)
5566 break_policy_func = policy_callback;
5568 break_policy_func = always_insert_breakpoint;
5572 should_insert_brekpoint (MonoMethod *method) {
5573 switch (break_policy_func (method)) {
5574 case MONO_BREAK_POLICY_ALWAYS:
5576 case MONO_BREAK_POLICY_NEVER:
5578 case MONO_BREAK_POLICY_ON_DBG:
5579 g_warning ("mdb no longer supported");
5582 g_warning ("Incorrect value returned from break policy callback");
5587 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5589 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5591 MonoInst *addr, *store, *load;
5592 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5594 /* the bounds check is already done by the callers */
5595 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5597 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5598 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5599 if (mini_type_is_reference (fsig->params [2]))
5600 emit_write_barrier (cfg, addr, load);
5602 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5603 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5610 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5612 return mini_type_is_reference (&klass->byval_arg);
5616 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5618 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5619 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5620 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5621 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5622 MonoInst *iargs [3];
5625 mono_class_setup_vtable (obj_array);
5626 g_assert (helper->slot);
5628 if (sp [0]->type != STACK_OBJ)
5630 if (sp [2]->type != STACK_OBJ)
5637 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5641 if (mini_is_gsharedvt_variable_klass (klass)) {
5644 // FIXME-VT: OP_ICONST optimization
5645 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5646 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5647 ins->opcode = OP_STOREV_MEMBASE;
5648 } else if (sp [1]->opcode == OP_ICONST) {
5649 int array_reg = sp [0]->dreg;
5650 int index_reg = sp [1]->dreg;
5651 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5654 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5655 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5657 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5658 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5659 if (generic_class_is_reference_type (cfg, klass))
5660 emit_write_barrier (cfg, addr, sp [2]);
5667 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5672 eklass = mono_class_from_mono_type (fsig->params [2]);
5674 eklass = mono_class_from_mono_type (fsig->ret);
5677 return emit_array_store (cfg, eklass, args, FALSE);
5679 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5686 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5689 int param_size, return_size;
5691 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5692 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5694 if (cfg->verbose_level > 3)
5695 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5697 //Don't allow mixing reference types with value types
5698 if (param_klass->valuetype != return_klass->valuetype) {
5699 if (cfg->verbose_level > 3)
5700 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5704 if (!param_klass->valuetype) {
5705 if (cfg->verbose_level > 3)
5706 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5711 if (param_klass->has_references || return_klass->has_references)
5714 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5715 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5716 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5717 if (cfg->verbose_level > 3)
5718 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5722 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5723 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5724 if (cfg->verbose_level > 3)
5725 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5729 param_size = mono_class_value_size (param_klass, &align);
5730 return_size = mono_class_value_size (return_klass, &align);
5732 //We can do it if sizes match
5733 if (param_size == return_size) {
5734 if (cfg->verbose_level > 3)
5735 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5739 //No simple way to handle struct if sizes don't match
5740 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5741 if (cfg->verbose_level > 3)
5742 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5747 * Same reg size category.
5748 * A quick note on why we don't require widening here.
5749 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5751 * Since the source value comes from a function argument, the JIT will already have
5752 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5754 if (param_size <= 4 && return_size <= 4) {
5755 if (cfg->verbose_level > 3)
5756 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5764 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5766 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5767 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5769 //Valuetypes that are semantically equivalent or numbers than can be widened to
5770 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5773 //Arrays of valuetypes that are semantically equivalent
5774 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5781 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5783 #ifdef MONO_ARCH_SIMD_INTRINSICS
5784 MonoInst *ins = NULL;
5786 if (cfg->opt & MONO_OPT_SIMD) {
5787 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5793 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5797 emit_memory_barrier (MonoCompile *cfg, int kind)
5799 MonoInst *ins = NULL;
5800 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5801 MONO_ADD_INS (cfg->cbb, ins);
5802 ins->backend.memory_barrier_kind = kind;
5808 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5810 MonoInst *ins = NULL;
5813 /* The LLVM backend supports these intrinsics */
5814 if (cmethod->klass == mono_defaults.math_class) {
5815 if (strcmp (cmethod->name, "Sin") == 0) {
5817 } else if (strcmp (cmethod->name, "Cos") == 0) {
5819 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5821 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5825 if (opcode && fsig->param_count == 1) {
5826 MONO_INST_NEW (cfg, ins, opcode);
5827 ins->type = STACK_R8;
5828 ins->dreg = mono_alloc_freg (cfg);
5829 ins->sreg1 = args [0]->dreg;
5830 MONO_ADD_INS (cfg->cbb, ins);
5834 if (cfg->opt & MONO_OPT_CMOV) {
5835 if (strcmp (cmethod->name, "Min") == 0) {
5836 if (fsig->params [0]->type == MONO_TYPE_I4)
5838 if (fsig->params [0]->type == MONO_TYPE_U4)
5839 opcode = OP_IMIN_UN;
5840 else if (fsig->params [0]->type == MONO_TYPE_I8)
5842 else if (fsig->params [0]->type == MONO_TYPE_U8)
5843 opcode = OP_LMIN_UN;
5844 } else if (strcmp (cmethod->name, "Max") == 0) {
5845 if (fsig->params [0]->type == MONO_TYPE_I4)
5847 if (fsig->params [0]->type == MONO_TYPE_U4)
5848 opcode = OP_IMAX_UN;
5849 else if (fsig->params [0]->type == MONO_TYPE_I8)
5851 else if (fsig->params [0]->type == MONO_TYPE_U8)
5852 opcode = OP_LMAX_UN;
5856 if (opcode && fsig->param_count == 2) {
5857 MONO_INST_NEW (cfg, ins, opcode);
5858 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5859 ins->dreg = mono_alloc_ireg (cfg);
5860 ins->sreg1 = args [0]->dreg;
5861 ins->sreg2 = args [1]->dreg;
5862 MONO_ADD_INS (cfg->cbb, ins);
5870 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5872 if (cmethod->klass == mono_defaults.array_class) {
5873 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5874 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5875 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5876 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5877 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5878 return emit_array_unsafe_mov (cfg, fsig, args);
5885 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5887 MonoInst *ins = NULL;
5889 static MonoClass *runtime_helpers_class = NULL;
5890 if (! runtime_helpers_class)
5891 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5892 "System.Runtime.CompilerServices", "RuntimeHelpers");
5894 if (cmethod->klass == mono_defaults.string_class) {
5895 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5896 int dreg = alloc_ireg (cfg);
5897 int index_reg = alloc_preg (cfg);
5898 int add_reg = alloc_preg (cfg);
5900 #if SIZEOF_REGISTER == 8
5901 /* The array reg is 64 bits but the index reg is only 32 */
5902 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5904 index_reg = args [1]->dreg;
5906 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5908 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5909 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5910 add_reg = ins->dreg;
5911 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5914 int mult_reg = alloc_preg (cfg);
5915 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5916 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5917 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5918 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5920 type_from_op (cfg, ins, NULL, NULL);
5922 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5923 int dreg = alloc_ireg (cfg);
5924 /* Decompose later to allow more optimizations */
5925 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5926 ins->type = STACK_I4;
5927 ins->flags |= MONO_INST_FAULT;
5928 cfg->cbb->has_array_access = TRUE;
5929 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5934 } else if (cmethod->klass == mono_defaults.object_class) {
5936 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5937 int dreg = alloc_ireg_ref (cfg);
5938 int vt_reg = alloc_preg (cfg);
5939 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5940 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5941 type_from_op (cfg, ins, NULL, NULL);
5944 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5945 int dreg = alloc_ireg (cfg);
5946 int t1 = alloc_ireg (cfg);
5948 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5949 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5950 ins->type = STACK_I4;
5953 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5954 MONO_INST_NEW (cfg, ins, OP_NOP);
5955 MONO_ADD_INS (cfg->cbb, ins);
5959 } else if (cmethod->klass == mono_defaults.array_class) {
5960 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5961 return emit_array_generic_access (cfg, fsig, args, FALSE);
5962 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5963 return emit_array_generic_access (cfg, fsig, args, TRUE);
5965 #ifndef MONO_BIG_ARRAYS
5967 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5970 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5971 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5972 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5973 int dreg = alloc_ireg (cfg);
5974 int bounds_reg = alloc_ireg_mp (cfg);
5975 MonoBasicBlock *end_bb, *szarray_bb;
5976 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5978 NEW_BBLOCK (cfg, end_bb);
5979 NEW_BBLOCK (cfg, szarray_bb);
5981 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5982 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5984 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5985 /* Non-szarray case */
5987 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5988 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5990 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5991 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5992 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5993 MONO_START_BB (cfg, szarray_bb);
5996 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5997 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5999 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6000 MONO_START_BB (cfg, end_bb);
6002 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6003 ins->type = STACK_I4;
6009 if (cmethod->name [0] != 'g')
6012 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6013 int dreg = alloc_ireg (cfg);
6014 int vtable_reg = alloc_preg (cfg);
6015 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6016 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6017 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6018 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6019 type_from_op (cfg, ins, NULL, NULL);
6022 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6023 int dreg = alloc_ireg (cfg);
6025 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6026 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6027 type_from_op (cfg, ins, NULL, NULL);
6032 } else if (cmethod->klass == runtime_helpers_class) {
6034 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6035 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6039 } else if (cmethod->klass == mono_defaults.thread_class) {
6040 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6041 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6042 MONO_ADD_INS (cfg->cbb, ins);
6044 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6045 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6046 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6048 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6050 if (fsig->params [0]->type == MONO_TYPE_I1)
6051 opcode = OP_LOADI1_MEMBASE;
6052 else if (fsig->params [0]->type == MONO_TYPE_U1)
6053 opcode = OP_LOADU1_MEMBASE;
6054 else if (fsig->params [0]->type == MONO_TYPE_I2)
6055 opcode = OP_LOADI2_MEMBASE;
6056 else if (fsig->params [0]->type == MONO_TYPE_U2)
6057 opcode = OP_LOADU2_MEMBASE;
6058 else if (fsig->params [0]->type == MONO_TYPE_I4)
6059 opcode = OP_LOADI4_MEMBASE;
6060 else if (fsig->params [0]->type == MONO_TYPE_U4)
6061 opcode = OP_LOADU4_MEMBASE;
6062 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6063 opcode = OP_LOADI8_MEMBASE;
6064 else if (fsig->params [0]->type == MONO_TYPE_R4)
6065 opcode = OP_LOADR4_MEMBASE;
6066 else if (fsig->params [0]->type == MONO_TYPE_R8)
6067 opcode = OP_LOADR8_MEMBASE;
6068 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6069 opcode = OP_LOAD_MEMBASE;
6072 MONO_INST_NEW (cfg, ins, opcode);
6073 ins->inst_basereg = args [0]->dreg;
6074 ins->inst_offset = 0;
6075 MONO_ADD_INS (cfg->cbb, ins);
6077 switch (fsig->params [0]->type) {
6084 ins->dreg = mono_alloc_ireg (cfg);
6085 ins->type = STACK_I4;
6089 ins->dreg = mono_alloc_lreg (cfg);
6090 ins->type = STACK_I8;
6094 ins->dreg = mono_alloc_ireg (cfg);
6095 #if SIZEOF_REGISTER == 8
6096 ins->type = STACK_I8;
6098 ins->type = STACK_I4;
6103 ins->dreg = mono_alloc_freg (cfg);
6104 ins->type = STACK_R8;
6107 g_assert (mini_type_is_reference (fsig->params [0]));
6108 ins->dreg = mono_alloc_ireg_ref (cfg);
6109 ins->type = STACK_OBJ;
6113 if (opcode == OP_LOADI8_MEMBASE)
6114 ins = mono_decompose_opcode (cfg, ins);
6116 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
6120 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6122 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6124 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6125 opcode = OP_STOREI1_MEMBASE_REG;
6126 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6127 opcode = OP_STOREI2_MEMBASE_REG;
6128 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6129 opcode = OP_STOREI4_MEMBASE_REG;
6130 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6131 opcode = OP_STOREI8_MEMBASE_REG;
6132 else if (fsig->params [0]->type == MONO_TYPE_R4)
6133 opcode = OP_STORER4_MEMBASE_REG;
6134 else if (fsig->params [0]->type == MONO_TYPE_R8)
6135 opcode = OP_STORER8_MEMBASE_REG;
6136 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6137 opcode = OP_STORE_MEMBASE_REG;
6140 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6142 MONO_INST_NEW (cfg, ins, opcode);
6143 ins->sreg1 = args [1]->dreg;
6144 ins->inst_destbasereg = args [0]->dreg;
6145 ins->inst_offset = 0;
6146 MONO_ADD_INS (cfg->cbb, ins);
6148 if (opcode == OP_STOREI8_MEMBASE_REG)
6149 ins = mono_decompose_opcode (cfg, ins);
6154 } else if (cmethod->klass->image == mono_defaults.corlib &&
6155 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6156 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6159 #if SIZEOF_REGISTER == 8
6160 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6161 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6162 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6163 ins->dreg = mono_alloc_preg (cfg);
6164 ins->sreg1 = args [0]->dreg;
6165 ins->type = STACK_I8;
6166 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6167 MONO_ADD_INS (cfg->cbb, ins);
6171 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6173 /* 64 bit reads are already atomic */
6174 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6175 load_ins->dreg = mono_alloc_preg (cfg);
6176 load_ins->inst_basereg = args [0]->dreg;
6177 load_ins->inst_offset = 0;
6178 load_ins->type = STACK_I8;
6179 MONO_ADD_INS (cfg->cbb, load_ins);
6181 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6188 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6189 MonoInst *ins_iconst;
6192 if (fsig->params [0]->type == MONO_TYPE_I4) {
6193 opcode = OP_ATOMIC_ADD_I4;
6194 cfg->has_atomic_add_i4 = TRUE;
6196 #if SIZEOF_REGISTER == 8
6197 else if (fsig->params [0]->type == MONO_TYPE_I8)
6198 opcode = OP_ATOMIC_ADD_I8;
6201 if (!mono_arch_opcode_supported (opcode))
6203 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6204 ins_iconst->inst_c0 = 1;
6205 ins_iconst->dreg = mono_alloc_ireg (cfg);
6206 MONO_ADD_INS (cfg->cbb, ins_iconst);
6208 MONO_INST_NEW (cfg, ins, opcode);
6209 ins->dreg = mono_alloc_ireg (cfg);
6210 ins->inst_basereg = args [0]->dreg;
6211 ins->inst_offset = 0;
6212 ins->sreg2 = ins_iconst->dreg;
6213 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6214 MONO_ADD_INS (cfg->cbb, ins);
6216 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6217 MonoInst *ins_iconst;
6220 if (fsig->params [0]->type == MONO_TYPE_I4) {
6221 opcode = OP_ATOMIC_ADD_I4;
6222 cfg->has_atomic_add_i4 = TRUE;
6224 #if SIZEOF_REGISTER == 8
6225 else if (fsig->params [0]->type == MONO_TYPE_I8)
6226 opcode = OP_ATOMIC_ADD_I8;
6229 if (!mono_arch_opcode_supported (opcode))
6231 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6232 ins_iconst->inst_c0 = -1;
6233 ins_iconst->dreg = mono_alloc_ireg (cfg);
6234 MONO_ADD_INS (cfg->cbb, ins_iconst);
6236 MONO_INST_NEW (cfg, ins, opcode);
6237 ins->dreg = mono_alloc_ireg (cfg);
6238 ins->inst_basereg = args [0]->dreg;
6239 ins->inst_offset = 0;
6240 ins->sreg2 = ins_iconst->dreg;
6241 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6242 MONO_ADD_INS (cfg->cbb, ins);
6244 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6247 if (fsig->params [0]->type == MONO_TYPE_I4) {
6248 opcode = OP_ATOMIC_ADD_I4;
6249 cfg->has_atomic_add_i4 = TRUE;
6251 #if SIZEOF_REGISTER == 8
6252 else if (fsig->params [0]->type == MONO_TYPE_I8)
6253 opcode = OP_ATOMIC_ADD_I8;
6256 if (!mono_arch_opcode_supported (opcode))
6258 MONO_INST_NEW (cfg, ins, opcode);
6259 ins->dreg = mono_alloc_ireg (cfg);
6260 ins->inst_basereg = args [0]->dreg;
6261 ins->inst_offset = 0;
6262 ins->sreg2 = args [1]->dreg;
6263 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6264 MONO_ADD_INS (cfg->cbb, ins);
6267 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6268 MonoInst *f2i = NULL, *i2f;
6269 guint32 opcode, f2i_opcode, i2f_opcode;
6270 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6271 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6273 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6274 fsig->params [0]->type == MONO_TYPE_R4) {
6275 opcode = OP_ATOMIC_EXCHANGE_I4;
6276 f2i_opcode = OP_MOVE_F_TO_I4;
6277 i2f_opcode = OP_MOVE_I4_TO_F;
6278 cfg->has_atomic_exchange_i4 = TRUE;
6280 #if SIZEOF_REGISTER == 8
6282 fsig->params [0]->type == MONO_TYPE_I8 ||
6283 fsig->params [0]->type == MONO_TYPE_R8 ||
6284 fsig->params [0]->type == MONO_TYPE_I) {
6285 opcode = OP_ATOMIC_EXCHANGE_I8;
6286 f2i_opcode = OP_MOVE_F_TO_I8;
6287 i2f_opcode = OP_MOVE_I8_TO_F;
6290 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6291 opcode = OP_ATOMIC_EXCHANGE_I4;
6292 cfg->has_atomic_exchange_i4 = TRUE;
6298 if (!mono_arch_opcode_supported (opcode))
6302 /* TODO: Decompose these opcodes instead of bailing here. */
6303 if (COMPILE_SOFT_FLOAT (cfg))
6306 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6307 f2i->dreg = mono_alloc_ireg (cfg);
6308 f2i->sreg1 = args [1]->dreg;
6309 if (f2i_opcode == OP_MOVE_F_TO_I4)
6310 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6311 MONO_ADD_INS (cfg->cbb, f2i);
6314 MONO_INST_NEW (cfg, ins, opcode);
6315 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6316 ins->inst_basereg = args [0]->dreg;
6317 ins->inst_offset = 0;
6318 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6319 MONO_ADD_INS (cfg->cbb, ins);
6321 switch (fsig->params [0]->type) {
6323 ins->type = STACK_I4;
6326 ins->type = STACK_I8;
6329 #if SIZEOF_REGISTER == 8
6330 ins->type = STACK_I8;
6332 ins->type = STACK_I4;
6337 ins->type = STACK_R8;
6340 g_assert (mini_type_is_reference (fsig->params [0]));
6341 ins->type = STACK_OBJ;
6346 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6347 i2f->dreg = mono_alloc_freg (cfg);
6348 i2f->sreg1 = ins->dreg;
6349 i2f->type = STACK_R8;
6350 if (i2f_opcode == OP_MOVE_I4_TO_F)
6351 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6352 MONO_ADD_INS (cfg->cbb, i2f);
6357 if (cfg->gen_write_barriers && is_ref)
6358 emit_write_barrier (cfg, args [0], args [1]);
6360 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6361 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6362 guint32 opcode, f2i_opcode, i2f_opcode;
6363 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6364 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6366 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6367 fsig->params [1]->type == MONO_TYPE_R4) {
6368 opcode = OP_ATOMIC_CAS_I4;
6369 f2i_opcode = OP_MOVE_F_TO_I4;
6370 i2f_opcode = OP_MOVE_I4_TO_F;
6371 cfg->has_atomic_cas_i4 = TRUE;
6373 #if SIZEOF_REGISTER == 8
6375 fsig->params [1]->type == MONO_TYPE_I8 ||
6376 fsig->params [1]->type == MONO_TYPE_R8 ||
6377 fsig->params [1]->type == MONO_TYPE_I) {
6378 opcode = OP_ATOMIC_CAS_I8;
6379 f2i_opcode = OP_MOVE_F_TO_I8;
6380 i2f_opcode = OP_MOVE_I8_TO_F;
6383 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6384 opcode = OP_ATOMIC_CAS_I4;
6385 cfg->has_atomic_cas_i4 = TRUE;
6391 if (!mono_arch_opcode_supported (opcode))
6395 /* TODO: Decompose these opcodes instead of bailing here. */
6396 if (COMPILE_SOFT_FLOAT (cfg))
6399 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6400 f2i_new->dreg = mono_alloc_ireg (cfg);
6401 f2i_new->sreg1 = args [1]->dreg;
6402 if (f2i_opcode == OP_MOVE_F_TO_I4)
6403 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6404 MONO_ADD_INS (cfg->cbb, f2i_new);
6406 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6407 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6408 f2i_cmp->sreg1 = args [2]->dreg;
6409 if (f2i_opcode == OP_MOVE_F_TO_I4)
6410 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6411 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6414 MONO_INST_NEW (cfg, ins, opcode);
6415 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6416 ins->sreg1 = args [0]->dreg;
6417 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6418 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6419 MONO_ADD_INS (cfg->cbb, ins);
6421 switch (fsig->params [1]->type) {
6423 ins->type = STACK_I4;
6426 ins->type = STACK_I8;
6429 #if SIZEOF_REGISTER == 8
6430 ins->type = STACK_I8;
6432 ins->type = STACK_I4;
6436 ins->type = cfg->r4_stack_type;
6439 ins->type = STACK_R8;
6442 g_assert (mini_type_is_reference (fsig->params [1]));
6443 ins->type = STACK_OBJ;
6448 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6449 i2f->dreg = mono_alloc_freg (cfg);
6450 i2f->sreg1 = ins->dreg;
6451 i2f->type = STACK_R8;
6452 if (i2f_opcode == OP_MOVE_I4_TO_F)
6453 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6454 MONO_ADD_INS (cfg->cbb, i2f);
6459 if (cfg->gen_write_barriers && is_ref)
6460 emit_write_barrier (cfg, args [0], args [1]);
6462 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6463 fsig->params [1]->type == MONO_TYPE_I4) {
6464 MonoInst *cmp, *ceq;
6466 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6469 /* int32 r = CAS (location, value, comparand); */
6470 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6471 ins->dreg = alloc_ireg (cfg);
6472 ins->sreg1 = args [0]->dreg;
6473 ins->sreg2 = args [1]->dreg;
6474 ins->sreg3 = args [2]->dreg;
6475 ins->type = STACK_I4;
6476 MONO_ADD_INS (cfg->cbb, ins);
6478 /* bool result = r == comparand; */
6479 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6480 cmp->sreg1 = ins->dreg;
6481 cmp->sreg2 = args [2]->dreg;
6482 cmp->type = STACK_I4;
6483 MONO_ADD_INS (cfg->cbb, cmp);
6485 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6486 ceq->dreg = alloc_ireg (cfg);
6487 ceq->type = STACK_I4;
6488 MONO_ADD_INS (cfg->cbb, ceq);
6490 /* *success = result; */
6491 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6493 cfg->has_atomic_cas_i4 = TRUE;
6495 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6496 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6500 } else if (cmethod->klass->image == mono_defaults.corlib &&
6501 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6502 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6505 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6507 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6508 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6510 if (fsig->params [0]->type == MONO_TYPE_I1)
6511 opcode = OP_ATOMIC_LOAD_I1;
6512 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6513 opcode = OP_ATOMIC_LOAD_U1;
6514 else if (fsig->params [0]->type == MONO_TYPE_I2)
6515 opcode = OP_ATOMIC_LOAD_I2;
6516 else if (fsig->params [0]->type == MONO_TYPE_U2)
6517 opcode = OP_ATOMIC_LOAD_U2;
6518 else if (fsig->params [0]->type == MONO_TYPE_I4)
6519 opcode = OP_ATOMIC_LOAD_I4;
6520 else if (fsig->params [0]->type == MONO_TYPE_U4)
6521 opcode = OP_ATOMIC_LOAD_U4;
6522 else if (fsig->params [0]->type == MONO_TYPE_R4)
6523 opcode = OP_ATOMIC_LOAD_R4;
6524 else if (fsig->params [0]->type == MONO_TYPE_R8)
6525 opcode = OP_ATOMIC_LOAD_R8;
6526 #if SIZEOF_REGISTER == 8
6527 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6528 opcode = OP_ATOMIC_LOAD_I8;
6529 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6530 opcode = OP_ATOMIC_LOAD_U8;
6532 else if (fsig->params [0]->type == MONO_TYPE_I)
6533 opcode = OP_ATOMIC_LOAD_I4;
6534 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6535 opcode = OP_ATOMIC_LOAD_U4;
6539 if (!mono_arch_opcode_supported (opcode))
6542 MONO_INST_NEW (cfg, ins, opcode);
6543 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6544 ins->sreg1 = args [0]->dreg;
6545 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6546 MONO_ADD_INS (cfg->cbb, ins);
6548 switch (fsig->params [0]->type) {
6549 case MONO_TYPE_BOOLEAN:
6556 ins->type = STACK_I4;
6560 ins->type = STACK_I8;
6564 #if SIZEOF_REGISTER == 8
6565 ins->type = STACK_I8;
6567 ins->type = STACK_I4;
6571 ins->type = cfg->r4_stack_type;
6574 ins->type = STACK_R8;
6577 g_assert (mini_type_is_reference (fsig->params [0]));
6578 ins->type = STACK_OBJ;
6584 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6586 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6588 if (fsig->params [0]->type == MONO_TYPE_I1)
6589 opcode = OP_ATOMIC_STORE_I1;
6590 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6591 opcode = OP_ATOMIC_STORE_U1;
6592 else if (fsig->params [0]->type == MONO_TYPE_I2)
6593 opcode = OP_ATOMIC_STORE_I2;
6594 else if (fsig->params [0]->type == MONO_TYPE_U2)
6595 opcode = OP_ATOMIC_STORE_U2;
6596 else if (fsig->params [0]->type == MONO_TYPE_I4)
6597 opcode = OP_ATOMIC_STORE_I4;
6598 else if (fsig->params [0]->type == MONO_TYPE_U4)
6599 opcode = OP_ATOMIC_STORE_U4;
6600 else if (fsig->params [0]->type == MONO_TYPE_R4)
6601 opcode = OP_ATOMIC_STORE_R4;
6602 else if (fsig->params [0]->type == MONO_TYPE_R8)
6603 opcode = OP_ATOMIC_STORE_R8;
6604 #if SIZEOF_REGISTER == 8
6605 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6606 opcode = OP_ATOMIC_STORE_I8;
6607 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6608 opcode = OP_ATOMIC_STORE_U8;
6610 else if (fsig->params [0]->type == MONO_TYPE_I)
6611 opcode = OP_ATOMIC_STORE_I4;
6612 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6613 opcode = OP_ATOMIC_STORE_U4;
6617 if (!mono_arch_opcode_supported (opcode))
6620 MONO_INST_NEW (cfg, ins, opcode);
6621 ins->dreg = args [0]->dreg;
6622 ins->sreg1 = args [1]->dreg;
6623 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6624 MONO_ADD_INS (cfg->cbb, ins);
6626 if (cfg->gen_write_barriers && is_ref)
6627 emit_write_barrier (cfg, args [0], args [1]);
6633 } else if (cmethod->klass->image == mono_defaults.corlib &&
6634 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6635 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6636 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6637 if (should_insert_brekpoint (cfg->method)) {
6638 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6640 MONO_INST_NEW (cfg, ins, OP_NOP);
6641 MONO_ADD_INS (cfg->cbb, ins);
6645 } else if (cmethod->klass->image == mono_defaults.corlib &&
6646 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6647 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6648 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6650 EMIT_NEW_ICONST (cfg, ins, 1);
6652 EMIT_NEW_ICONST (cfg, ins, 0);
6655 } else if (cmethod->klass->image == mono_defaults.corlib &&
6656 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6657 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6658 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6659 /* No stack walks are current available, so implement this as an intrinsic */
6660 MonoInst *assembly_ins;
6662 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6663 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6666 } else if (cmethod->klass == mono_defaults.math_class) {
6668 * There is general branchless code for Min/Max, but it does not work for
6670 * http://everything2.com/?node_id=1051618
6672 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6673 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6674 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6675 !strcmp (cmethod->klass->name, "Selector")) ||
6676 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6677 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6678 !strcmp (cmethod->klass->name, "Selector"))
6680 if (cfg->backend->have_objc_get_selector &&
6681 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6682 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6685 MonoJumpInfoToken *ji;
6688 cfg->disable_llvm = TRUE;
6690 if (args [0]->opcode == OP_GOT_ENTRY) {
6691 pi = args [0]->inst_p1;
6692 g_assert (pi->opcode == OP_PATCH_INFO);
6693 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6696 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6697 ji = args [0]->inst_p0;
6700 NULLIFY_INS (args [0]);
6703 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6704 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6705 ins->dreg = mono_alloc_ireg (cfg);
6707 ins->inst_p0 = mono_string_to_utf8 (s);
6708 MONO_ADD_INS (cfg->cbb, ins);
6713 #ifdef MONO_ARCH_SIMD_INTRINSICS
6714 if (cfg->opt & MONO_OPT_SIMD) {
6715 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6721 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6725 if (COMPILE_LLVM (cfg)) {
6726 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6731 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6735 * This entry point could be used later for arbitrary method
6738 inline static MonoInst*
6739 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6740 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6742 if (method->klass == mono_defaults.string_class) {
6743 /* managed string allocation support */
6744 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6745 MonoInst *iargs [2];
6746 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6747 MonoMethod *managed_alloc = NULL;
6749 g_assert (vtable); /*Should not fail since it System.String*/
6750 #ifndef MONO_CROSS_COMPILE
6751 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6755 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6756 iargs [1] = args [0];
6757 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6764 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6766 MonoInst *store, *temp;
6769 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6770 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6773 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6774 * would be different than the MonoInst's used to represent arguments, and
6775 * the ldelema implementation can't deal with that.
6776 * Solution: When ldelema is used on an inline argument, create a var for
6777 * it, emit ldelema on that var, and emit the saving code below in
6778 * inline_method () if needed.
6780 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6781 cfg->args [i] = temp;
6782 /* This uses cfg->args [i] which is set by the preceeding line */
6783 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6784 store->cil_code = sp [0]->cil_code;
6789 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6790 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6792 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6794 check_inline_called_method_name_limit (MonoMethod *called_method)
6797 static const char *limit = NULL;
6799 if (limit == NULL) {
6800 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6802 if (limit_string != NULL)
6803 limit = limit_string;
6808 if (limit [0] != '\0') {
6809 char *called_method_name = mono_method_full_name (called_method, TRUE);
6811 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6812 g_free (called_method_name);
6814 //return (strncmp_result <= 0);
6815 return (strncmp_result == 0);
6822 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6824 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6827 static const char *limit = NULL;
6829 if (limit == NULL) {
6830 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6831 if (limit_string != NULL) {
6832 limit = limit_string;
6838 if (limit [0] != '\0') {
6839 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6841 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6842 g_free (caller_method_name);
6844 //return (strncmp_result <= 0);
6845 return (strncmp_result == 0);
6853 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6855 static double r8_0 = 0.0;
6856 static float r4_0 = 0.0;
6860 rtype = mini_get_underlying_type (rtype);
6864 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6865 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6866 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6867 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6868 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6869 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6870 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6871 ins->type = STACK_R4;
6872 ins->inst_p0 = (void*)&r4_0;
6874 MONO_ADD_INS (cfg->cbb, ins);
6875 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6876 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6877 ins->type = STACK_R8;
6878 ins->inst_p0 = (void*)&r8_0;
6880 MONO_ADD_INS (cfg->cbb, ins);
6881 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6882 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6883 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6884 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6885 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6887 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6892 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6896 rtype = mini_get_underlying_type (rtype);
6900 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6901 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6902 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6903 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6904 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6905 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6906 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6907 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6908 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6909 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6910 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6911 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6912 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6913 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6915 emit_init_rvar (cfg, dreg, rtype);
6919 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6921 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6923 MonoInst *var = cfg->locals [local];
6924 if (COMPILE_SOFT_FLOAT (cfg)) {
6926 int reg = alloc_dreg (cfg, var->type);
6927 emit_init_rvar (cfg, reg, type);
6928 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6931 emit_init_rvar (cfg, var->dreg, type);
6933 emit_dummy_init_rvar (cfg, var->dreg, type);
6940 * Return the cost of inlining CMETHOD.
6943 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6944 guchar *ip, guint real_offset, gboolean inline_always)
6946 MonoInst *ins, *rvar = NULL;
6947 MonoMethodHeader *cheader;
6948 MonoBasicBlock *ebblock, *sbblock;
6950 MonoMethod *prev_inlined_method;
6951 MonoInst **prev_locals, **prev_args;
6952 MonoType **prev_arg_types;
6953 guint prev_real_offset;
6954 GHashTable *prev_cbb_hash;
6955 MonoBasicBlock **prev_cil_offset_to_bb;
6956 MonoBasicBlock *prev_cbb;
6957 unsigned char* prev_cil_start;
6958 guint32 prev_cil_offset_to_bb_len;
6959 MonoMethod *prev_current_method;
6960 MonoGenericContext *prev_generic_context;
6961 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6963 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6965 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6966 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6969 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6970 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6975 fsig = mono_method_signature (cmethod);
6977 if (cfg->verbose_level > 2)
6978 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6980 if (!cmethod->inline_info) {
6981 cfg->stat_inlineable_methods++;
6982 cmethod->inline_info = 1;
6985 /* allocate local variables */
6986 cheader = mono_method_get_header (cmethod);
6988 if (cheader == NULL || mono_loader_get_last_error ()) {
6989 MonoLoaderError *error = mono_loader_get_last_error ();
6992 mono_metadata_free_mh (cheader);
6993 if (inline_always && error)
6994 mono_cfg_set_exception (cfg, error->exception_type);
6996 mono_loader_clear_error ();
7000 /*Must verify before creating locals as it can cause the JIT to assert.*/
7001 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7002 mono_metadata_free_mh (cheader);
7006 /* allocate space to store the return value */
7007 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7008 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7011 prev_locals = cfg->locals;
7012 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7013 for (i = 0; i < cheader->num_locals; ++i)
7014 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7016 /* allocate start and end blocks */
7017 /* This is needed so if the inline is aborted, we can clean up */
7018 NEW_BBLOCK (cfg, sbblock);
7019 sbblock->real_offset = real_offset;
7021 NEW_BBLOCK (cfg, ebblock);
7022 ebblock->block_num = cfg->num_bblocks++;
7023 ebblock->real_offset = real_offset;
7025 prev_args = cfg->args;
7026 prev_arg_types = cfg->arg_types;
7027 prev_inlined_method = cfg->inlined_method;
7028 cfg->inlined_method = cmethod;
7029 cfg->ret_var_set = FALSE;
7030 cfg->inline_depth ++;
7031 prev_real_offset = cfg->real_offset;
7032 prev_cbb_hash = cfg->cbb_hash;
7033 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7034 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7035 prev_cil_start = cfg->cil_start;
7036 prev_cbb = cfg->cbb;
7037 prev_current_method = cfg->current_method;
7038 prev_generic_context = cfg->generic_context;
7039 prev_ret_var_set = cfg->ret_var_set;
7040 prev_disable_inline = cfg->disable_inline;
7042 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7045 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
7047 ret_var_set = cfg->ret_var_set;
7049 cfg->inlined_method = prev_inlined_method;
7050 cfg->real_offset = prev_real_offset;
7051 cfg->cbb_hash = prev_cbb_hash;
7052 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7053 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7054 cfg->cil_start = prev_cil_start;
7055 cfg->locals = prev_locals;
7056 cfg->args = prev_args;
7057 cfg->arg_types = prev_arg_types;
7058 cfg->current_method = prev_current_method;
7059 cfg->generic_context = prev_generic_context;
7060 cfg->ret_var_set = prev_ret_var_set;
7061 cfg->disable_inline = prev_disable_inline;
7062 cfg->inline_depth --;
7064 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7065 if (cfg->verbose_level > 2)
7066 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7068 cfg->stat_inlined_methods++;
7070 /* always add some code to avoid block split failures */
7071 MONO_INST_NEW (cfg, ins, OP_NOP);
7072 MONO_ADD_INS (prev_cbb, ins);
7074 prev_cbb->next_bb = sbblock;
7075 link_bblock (cfg, prev_cbb, sbblock);
7078 * Get rid of the begin and end bblocks if possible to aid local
7081 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7083 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7084 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7086 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7087 MonoBasicBlock *prev = ebblock->in_bb [0];
7088 mono_merge_basic_blocks (cfg, prev, ebblock);
7090 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7091 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7092 cfg->cbb = prev_cbb;
7096 * Its possible that the rvar is set in some prev bblock, but not in others.
7102 for (i = 0; i < ebblock->in_count; ++i) {
7103 bb = ebblock->in_bb [i];
7105 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7108 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7118 * If the inlined method contains only a throw, then the ret var is not
7119 * set, so set it to a dummy value.
7122 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7124 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7127 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7130 if (cfg->verbose_level > 2)
7131 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7132 cfg->exception_type = MONO_EXCEPTION_NONE;
7133 mono_loader_clear_error ();
7135 /* This gets rid of the newly added bblocks */
7136 cfg->cbb = prev_cbb;
7138 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7143 * Some of these comments may well be out-of-date.
7144 * Design decisions: we do a single pass over the IL code (and we do bblock
7145 * splitting/merging in the few cases when it's required: a back jump to an IL
7146 * address that was not already seen as bblock starting point).
7147 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7148 * Complex operations are decomposed in simpler ones right away. We need to let the
7149 * arch-specific code peek and poke inside this process somehow (except when the
7150 * optimizations can take advantage of the full semantic info of coarse opcodes).
7151 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7152 * MonoInst->opcode initially is the IL opcode or some simplification of that
7153 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7154 * opcode with value bigger than OP_LAST.
7155 * At this point the IR can be handed over to an interpreter, a dumb code generator
7156 * or to the optimizing code generator that will translate it to SSA form.
7158 * Profiling directed optimizations.
7159 * We may compile by default with few or no optimizations and instrument the code
7160 * or the user may indicate what methods to optimize the most either in a config file
7161 * or through repeated runs where the compiler applies offline the optimizations to
7162 * each method and then decides if it was worth it.
7165 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7166 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7167 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7168 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7169 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7170 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7171 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7172 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7174 /* offset from br.s -> br like opcodes */
7175 #define BIG_BRANCH_OFFSET 13
7178 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7180 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7182 return b == NULL || b == bb;
7186 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7188 unsigned char *ip = start;
7189 unsigned char *target;
7192 MonoBasicBlock *bblock;
7193 const MonoOpcode *opcode;
7196 cli_addr = ip - start;
7197 i = mono_opcode_value ((const guint8 **)&ip, end);
7200 opcode = &mono_opcodes [i];
7201 switch (opcode->argument) {
7202 case MonoInlineNone:
7205 case MonoInlineString:
7206 case MonoInlineType:
7207 case MonoInlineField:
7208 case MonoInlineMethod:
7211 case MonoShortInlineR:
7218 case MonoShortInlineVar:
7219 case MonoShortInlineI:
7222 case MonoShortInlineBrTarget:
7223 target = start + cli_addr + 2 + (signed char)ip [1];
7224 GET_BBLOCK (cfg, bblock, target);
7227 GET_BBLOCK (cfg, bblock, ip);
7229 case MonoInlineBrTarget:
7230 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7231 GET_BBLOCK (cfg, bblock, target);
7234 GET_BBLOCK (cfg, bblock, ip);
7236 case MonoInlineSwitch: {
7237 guint32 n = read32 (ip + 1);
7240 cli_addr += 5 + 4 * n;
7241 target = start + cli_addr;
7242 GET_BBLOCK (cfg, bblock, target);
7244 for (j = 0; j < n; ++j) {
7245 target = start + cli_addr + (gint32)read32 (ip);
7246 GET_BBLOCK (cfg, bblock, target);
7256 g_assert_not_reached ();
7259 if (i == CEE_THROW) {
7260 unsigned char *bb_start = ip - 1;
7262 /* Find the start of the bblock containing the throw */
7264 while ((bb_start >= start) && !bblock) {
7265 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7269 bblock->out_of_line = 1;
7279 static inline MonoMethod *
7280 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7284 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7285 method = mono_method_get_wrapper_data (m, token);
7288 method = mono_class_inflate_generic_method_checked (method, context, &error);
7289 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7292 method = mono_get_method_full (m->klass->image, token, klass, context);
7298 static inline MonoMethod *
7299 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7301 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7303 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7309 static inline MonoClass*
7310 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7315 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7316 klass = mono_method_get_wrapper_data (method, token);
7318 klass = mono_class_inflate_generic_class (klass, context);
7320 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7321 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7324 mono_class_init (klass);
7328 static inline MonoMethodSignature*
7329 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7331 MonoMethodSignature *fsig;
7333 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7334 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7336 fsig = mono_metadata_parse_signature (method->klass->image, token);
7340 fsig = mono_inflate_generic_signature(fsig, context, &error);
7342 g_assert(mono_error_ok(&error));
7348 throw_exception (void)
7350 static MonoMethod *method = NULL;
7353 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7354 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7361 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7363 MonoMethod *thrower = throw_exception ();
7366 EMIT_NEW_PCONST (cfg, args [0], ex);
7367 mono_emit_method_call (cfg, thrower, args, NULL);
7371 * Return the original method is a wrapper is specified. We can only access
7372 * the custom attributes from the original method.
7375 get_original_method (MonoMethod *method)
7377 if (method->wrapper_type == MONO_WRAPPER_NONE)
7380 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7381 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7384 /* in other cases we need to find the original method */
7385 return mono_marshal_method_from_wrapper (method);
7389 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7391 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7392 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7394 emit_throw_exception (cfg, ex);
7398 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7400 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7401 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7403 emit_throw_exception (cfg, ex);
7407 * Check that the IL instructions at ip are the array initialization
7408 * sequence and return the pointer to the data and the size.
7411 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7414 * newarr[System.Int32]
7416 * ldtoken field valuetype ...
7417 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7419 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7421 guint32 token = read32 (ip + 7);
7422 guint32 field_token = read32 (ip + 2);
7423 guint32 field_index = field_token & 0xffffff;
7425 const char *data_ptr;
7427 MonoMethod *cmethod;
7428 MonoClass *dummy_class;
7429 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7433 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7437 *out_field_token = field_token;
7439 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7442 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7444 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7445 case MONO_TYPE_BOOLEAN:
7449 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7450 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7451 case MONO_TYPE_CHAR:
7468 if (size > mono_type_size (field->type, &dummy_align))
7471 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7472 if (!image_is_dynamic (method->klass->image)) {
7473 field_index = read32 (ip + 2) & 0xffffff;
7474 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7475 data_ptr = mono_image_rva_map (method->klass->image, rva);
7476 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7477 /* for aot code we do the lookup on load */
7478 if (aot && data_ptr)
7479 return GUINT_TO_POINTER (rva);
7481 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7483 data_ptr = mono_field_get_data (field);
7491 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7493 char *method_fname = mono_method_full_name (method, TRUE);
7495 MonoMethodHeader *header = mono_method_get_header (method);
7497 if (header->code_size == 0)
7498 method_code = g_strdup ("method body is empty.");
7500 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7501 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7502 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7503 g_free (method_fname);
7504 g_free (method_code);
7505 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7509 set_exception_object (MonoCompile *cfg, MonoException *exception)
7511 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7512 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr, MONO_ROOT_SOURCE_JIT, "jit exception");
7513 cfg->exception_ptr = exception;
7517 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7520 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7521 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7522 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7523 /* Optimize reg-reg moves away */
7525 * Can't optimize other opcodes, since sp[0] might point to
7526 * the last ins of a decomposed opcode.
7528 sp [0]->dreg = (cfg)->locals [n]->dreg;
7530 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7535 * ldloca inhibits many optimizations so try to get rid of it in common
7538 static inline unsigned char *
7539 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7549 local = read16 (ip + 2);
7553 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7554 /* From the INITOBJ case */
7555 token = read32 (ip + 2);
7556 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7557 CHECK_TYPELOAD (klass);
7558 type = mini_get_underlying_type (&klass->byval_arg);
7559 emit_init_local (cfg, local, type, TRUE);
7567 emit_runtime_constant (MonoCompile *cfg, MonoInst **ins, MonoJumpInfoType patch_type)
7569 if (cfg->compile_aot) {
7570 EMIT_NEW_AOTCONST (cfg, *ins, patch_type, NULL);
7575 ji.type = patch_type;
7576 target = mono_resolve_patch_target (NULL, NULL, NULL, &ji, FALSE);
7578 EMIT_NEW_PCONST (cfg, *ins, target);
7583 is_exception_class (MonoClass *klass)
7586 if (klass == mono_defaults.exception_class)
7588 klass = klass->parent;
7594 * is_jit_optimizer_disabled:
7596 * Determine whenever M's assembly has a DebuggableAttribute with the
7597 * IsJITOptimizerDisabled flag set.
7600 is_jit_optimizer_disabled (MonoMethod *m)
7602 MonoAssembly *ass = m->klass->image->assembly;
7603 MonoCustomAttrInfo* attrs;
7604 static MonoClass *klass;
7606 gboolean val = FALSE;
7609 if (ass->jit_optimizer_disabled_inited)
7610 return ass->jit_optimizer_disabled;
7613 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7616 ass->jit_optimizer_disabled = FALSE;
7617 mono_memory_barrier ();
7618 ass->jit_optimizer_disabled_inited = TRUE;
7622 attrs = mono_custom_attrs_from_assembly (ass);
7624 for (i = 0; i < attrs->num_attrs; ++i) {
7625 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7627 MonoMethodSignature *sig;
7629 if (!attr->ctor || attr->ctor->klass != klass)
7631 /* Decode the attribute. See reflection.c */
7632 p = (const char*)attr->data;
7633 g_assert (read16 (p) == 0x0001);
7636 // FIXME: Support named parameters
7637 sig = mono_method_signature (attr->ctor);
7638 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7640 /* Two boolean arguments */
7644 mono_custom_attrs_free (attrs);
7647 ass->jit_optimizer_disabled = val;
7648 mono_memory_barrier ();
7649 ass->jit_optimizer_disabled_inited = TRUE;
7655 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7657 gboolean supported_tail_call;
7660 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7662 for (i = 0; i < fsig->param_count; ++i) {
7663 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7664 /* These can point to the current method's stack */
7665 supported_tail_call = FALSE;
7667 if (fsig->hasthis && cmethod->klass->valuetype)
7668 /* this might point to the current method's stack */
7669 supported_tail_call = FALSE;
7670 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7671 supported_tail_call = FALSE;
7672 if (cfg->method->save_lmf)
7673 supported_tail_call = FALSE;
7674 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7675 supported_tail_call = FALSE;
7676 if (call_opcode != CEE_CALL)
7677 supported_tail_call = FALSE;
7679 /* Debugging support */
7681 if (supported_tail_call) {
7682 if (!mono_debug_count ())
7683 supported_tail_call = FALSE;
7687 return supported_tail_call;
7693 * Handle calls made to ctors from NEWOBJ opcodes.
7696 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7697 MonoInst **sp, guint8 *ip, int *inline_costs)
7699 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7701 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7702 mono_method_is_generic_sharable (cmethod, TRUE)) {
7703 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7704 mono_class_vtable (cfg->domain, cmethod->klass);
7705 CHECK_TYPELOAD (cmethod->klass);
7707 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7708 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7711 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7712 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7714 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7716 CHECK_TYPELOAD (cmethod->klass);
7717 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7722 /* Avoid virtual calls to ctors if possible */
7723 if (mono_class_is_marshalbyref (cmethod->klass))
7724 callvirt_this_arg = sp [0];
7726 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7727 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7728 CHECK_CFG_EXCEPTION;
7729 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7730 mono_method_check_inlining (cfg, cmethod) &&
7731 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7734 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7735 cfg->real_offset += 5;
7737 *inline_costs += costs - 5;
7739 INLINE_FAILURE ("inline failure");
7740 // FIXME-VT: Clean this up
7741 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7742 GSHAREDVT_FAILURE(*ip);
7743 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7745 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7748 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7749 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7750 } else if (context_used &&
7751 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7752 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7753 MonoInst *cmethod_addr;
7755 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7757 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7758 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7760 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7762 INLINE_FAILURE ("ctor call");
7763 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7764 callvirt_this_arg, NULL, vtable_arg);
7771 emit_setret (MonoCompile *cfg, MonoInst *val)
7773 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7776 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7779 if (!cfg->vret_addr) {
7780 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7782 EMIT_NEW_RETLOADA (cfg, ret_addr);
7784 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7785 ins->klass = mono_class_from_mono_type (ret_type);
7788 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7789 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7790 MonoInst *iargs [1];
7794 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7795 mono_arch_emit_setret (cfg, cfg->method, conv);
7797 mono_arch_emit_setret (cfg, cfg->method, val);
7800 mono_arch_emit_setret (cfg, cfg->method, val);
7805 static MonoMethodSignature*
7806 sig_to_rgctx_sig (MonoMethodSignature *sig)
7808 // FIXME: memory allocation
7809 MonoMethodSignature *res;
7812 res = g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
7813 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
7814 res->param_count = sig->param_count + 1;
7815 for (i = 0; i < sig->param_count; ++i)
7816 res->params [i] = sig->params [i];
7817 res->params [sig->param_count] = &mono_defaults.int_class->byval_arg;
7822 * mono_method_to_ir:
7824 * Translate the .net IL into linear IR.
7827 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7828 MonoInst *return_var, MonoInst **inline_args,
7829 guint inline_offset, gboolean is_virtual_call)
7832 MonoInst *ins, **sp, **stack_start;
7833 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7834 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7835 MonoMethod *cmethod, *method_definition;
7836 MonoInst **arg_array;
7837 MonoMethodHeader *header;
7839 guint32 token, ins_flag;
7841 MonoClass *constrained_class = NULL;
7842 unsigned char *ip, *end, *target, *err_pos;
7843 MonoMethodSignature *sig;
7844 MonoGenericContext *generic_context = NULL;
7845 MonoGenericContainer *generic_container = NULL;
7846 MonoType **param_types;
7847 int i, n, start_new_bblock, dreg;
7848 int num_calls = 0, inline_costs = 0;
7849 int breakpoint_id = 0;
7851 GSList *class_inits = NULL;
7852 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7854 gboolean init_locals, seq_points, skip_dead_blocks;
7855 gboolean sym_seq_points = FALSE;
7856 MonoDebugMethodInfo *minfo;
7857 MonoBitSet *seq_point_locs = NULL;
7858 MonoBitSet *seq_point_set_locs = NULL;
7860 cfg->disable_inline = is_jit_optimizer_disabled (method);
7862 /* serialization and xdomain stuff may need access to private fields and methods */
7863 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7864 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7865 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7866 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7867 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7868 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7870 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7871 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7872 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7873 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7874 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7876 image = method->klass->image;
7877 header = mono_method_get_header (method);
7879 MonoLoaderError *error;
7881 if ((error = mono_loader_get_last_error ())) {
7882 mono_cfg_set_exception (cfg, error->exception_type);
7884 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7885 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7887 goto exception_exit;
7889 generic_container = mono_method_get_generic_container (method);
7890 sig = mono_method_signature (method);
7891 num_args = sig->hasthis + sig->param_count;
7892 ip = (unsigned char*)header->code;
7893 cfg->cil_start = ip;
7894 end = ip + header->code_size;
7895 cfg->stat_cil_code_size += header->code_size;
7897 seq_points = cfg->gen_seq_points && cfg->method == method;
7899 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7900 /* We could hit a seq point before attaching to the JIT (#8338) */
7904 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7905 minfo = mono_debug_lookup_method (method);
7907 MonoSymSeqPoint *sps;
7908 int i, n_il_offsets;
7910 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7911 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7912 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7913 sym_seq_points = TRUE;
7914 for (i = 0; i < n_il_offsets; ++i) {
7915 if (sps [i].il_offset < header->code_size)
7916 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7919 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7920 /* Methods without line number info like auto-generated property accessors */
7921 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7922 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7923 sym_seq_points = TRUE;
7928 * Methods without init_locals set could cause asserts in various passes
7929 * (#497220). To work around this, we emit dummy initialization opcodes
7930 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7931 * on some platforms.
7933 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7934 init_locals = header->init_locals;
7938 method_definition = method;
7939 while (method_definition->is_inflated) {
7940 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7941 method_definition = imethod->declaring;
7944 /* SkipVerification is not allowed if core-clr is enabled */
7945 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7947 dont_verify_stloc = TRUE;
7950 if (sig->is_inflated)
7951 generic_context = mono_method_get_context (method);
7952 else if (generic_container)
7953 generic_context = &generic_container->context;
7954 cfg->generic_context = generic_context;
7957 g_assert (!sig->has_type_parameters);
7959 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7960 g_assert (method->is_inflated);
7961 g_assert (mono_method_get_context (method)->method_inst);
7963 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7964 g_assert (sig->generic_param_count);
7966 if (cfg->method == method) {
7967 cfg->real_offset = 0;
7969 cfg->real_offset = inline_offset;
7972 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7973 cfg->cil_offset_to_bb_len = header->code_size;
7975 cfg->current_method = method;
7977 if (cfg->verbose_level > 2)
7978 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7980 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7982 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7983 for (n = 0; n < sig->param_count; ++n)
7984 param_types [n + sig->hasthis] = sig->params [n];
7985 cfg->arg_types = param_types;
7987 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7988 if (cfg->method == method) {
7990 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7991 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7994 NEW_BBLOCK (cfg, start_bblock);
7995 cfg->bb_entry = start_bblock;
7996 start_bblock->cil_code = NULL;
7997 start_bblock->cil_length = 0;
8000 NEW_BBLOCK (cfg, end_bblock);
8001 cfg->bb_exit = end_bblock;
8002 end_bblock->cil_code = NULL;
8003 end_bblock->cil_length = 0;
8004 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8005 g_assert (cfg->num_bblocks == 2);
8007 arg_array = cfg->args;
8009 if (header->num_clauses) {
8010 cfg->spvars = g_hash_table_new (NULL, NULL);
8011 cfg->exvars = g_hash_table_new (NULL, NULL);
8013 /* handle exception clauses */
8014 for (i = 0; i < header->num_clauses; ++i) {
8015 MonoBasicBlock *try_bb;
8016 MonoExceptionClause *clause = &header->clauses [i];
8017 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8019 try_bb->real_offset = clause->try_offset;
8020 try_bb->try_start = TRUE;
8021 try_bb->region = ((i + 1) << 8) | clause->flags;
8022 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8023 tblock->real_offset = clause->handler_offset;
8024 tblock->flags |= BB_EXCEPTION_HANDLER;
8027 * Linking the try block with the EH block hinders inlining as we won't be able to
8028 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8030 if (COMPILE_LLVM (cfg))
8031 link_bblock (cfg, try_bb, tblock);
8033 if (*(ip + clause->handler_offset) == CEE_POP)
8034 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8036 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8037 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8038 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8039 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8040 MONO_ADD_INS (tblock, ins);
8042 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8043 /* finally clauses already have a seq point */
8044 /* seq points for filter clauses are emitted below */
8045 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8046 MONO_ADD_INS (tblock, ins);
8049 /* todo: is a fault block unsafe to optimize? */
8050 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8051 tblock->flags |= BB_EXCEPTION_UNSAFE;
8054 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8056 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8058 /* catch and filter blocks get the exception object on the stack */
8059 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8060 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8062 /* mostly like handle_stack_args (), but just sets the input args */
8063 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8064 tblock->in_scount = 1;
8065 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8066 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8070 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8071 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8072 if (!cfg->compile_llvm) {
8073 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8074 ins->dreg = tblock->in_stack [0]->dreg;
8075 MONO_ADD_INS (tblock, ins);
8078 MonoInst *dummy_use;
8081 * Add a dummy use for the exvar so its liveness info will be
8084 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8087 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8088 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8089 MONO_ADD_INS (tblock, ins);
8092 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8093 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8094 tblock->flags |= BB_EXCEPTION_HANDLER;
8095 tblock->real_offset = clause->data.filter_offset;
8096 tblock->in_scount = 1;
8097 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8098 /* The filter block shares the exvar with the handler block */
8099 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8100 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8101 MONO_ADD_INS (tblock, ins);
8105 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8106 clause->data.catch_class &&
8108 mono_class_check_context_used (clause->data.catch_class)) {
8110 * In shared generic code with catch
8111 * clauses containing type variables
8112 * the exception handling code has to
8113 * be able to get to the rgctx.
8114 * Therefore we have to make sure that
8115 * the vtable/mrgctx argument (for
8116 * static or generic methods) or the
8117 * "this" argument (for non-static
8118 * methods) are live.
8120 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8121 mini_method_get_context (method)->method_inst ||
8122 method->klass->valuetype) {
8123 mono_get_vtable_var (cfg);
8125 MonoInst *dummy_use;
8127 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8132 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8133 cfg->cbb = start_bblock;
8134 cfg->args = arg_array;
8135 mono_save_args (cfg, sig, inline_args);
8138 /* FIRST CODE BLOCK */
8139 NEW_BBLOCK (cfg, tblock);
8140 tblock->cil_code = ip;
8144 ADD_BBLOCK (cfg, tblock);
8146 if (cfg->method == method) {
8147 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8148 if (breakpoint_id) {
8149 MONO_INST_NEW (cfg, ins, OP_BREAK);
8150 MONO_ADD_INS (cfg->cbb, ins);
8154 /* we use a separate basic block for the initialization code */
8155 NEW_BBLOCK (cfg, init_localsbb);
8156 cfg->bb_init = init_localsbb;
8157 init_localsbb->real_offset = cfg->real_offset;
8158 start_bblock->next_bb = init_localsbb;
8159 init_localsbb->next_bb = cfg->cbb;
8160 link_bblock (cfg, start_bblock, init_localsbb);
8161 link_bblock (cfg, init_localsbb, cfg->cbb);
8163 cfg->cbb = init_localsbb;
8165 if (cfg->gsharedvt && cfg->method == method) {
8166 MonoGSharedVtMethodInfo *info;
8167 MonoInst *var, *locals_var;
8170 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8171 info->method = cfg->method;
8172 info->count_entries = 16;
8173 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8174 cfg->gsharedvt_info = info;
8176 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8177 /* prevent it from being register allocated */
8178 //var->flags |= MONO_INST_VOLATILE;
8179 cfg->gsharedvt_info_var = var;
8181 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8182 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8184 /* Allocate locals */
8185 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8186 /* prevent it from being register allocated */
8187 //locals_var->flags |= MONO_INST_VOLATILE;
8188 cfg->gsharedvt_locals_var = locals_var;
8190 dreg = alloc_ireg (cfg);
8191 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8193 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8194 ins->dreg = locals_var->dreg;
8196 MONO_ADD_INS (cfg->cbb, ins);
8197 cfg->gsharedvt_locals_var_ins = ins;
8199 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8202 ins->flags |= MONO_INST_INIT;
8206 if (mono_security_core_clr_enabled ()) {
8207 /* check if this is native code, e.g. an icall or a p/invoke */
8208 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8209 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8211 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8212 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8214 /* if this ia a native call then it can only be JITted from platform code */
8215 if ((icall || pinvk) && method->klass && method->klass->image) {
8216 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8217 MonoException *ex = icall ? mono_get_exception_security () :
8218 mono_get_exception_method_access ();
8219 emit_throw_exception (cfg, ex);
8226 CHECK_CFG_EXCEPTION;
8228 if (header->code_size == 0)
8231 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8236 if (cfg->method == method)
8237 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8239 for (n = 0; n < header->num_locals; ++n) {
8240 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8245 /* We force the vtable variable here for all shared methods
8246 for the possibility that they might show up in a stack
8247 trace where their exact instantiation is needed. */
8248 if (cfg->gshared && method == cfg->method) {
8249 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8250 mini_method_get_context (method)->method_inst ||
8251 method->klass->valuetype) {
8252 mono_get_vtable_var (cfg);
8254 /* FIXME: Is there a better way to do this?
8255 We need the variable live for the duration
8256 of the whole method. */
8257 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8261 /* add a check for this != NULL to inlined methods */
8262 if (is_virtual_call) {
8265 NEW_ARGLOAD (cfg, arg_ins, 0);
8266 MONO_ADD_INS (cfg->cbb, arg_ins);
8267 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8270 skip_dead_blocks = !dont_verify;
8271 if (skip_dead_blocks) {
8272 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8277 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8278 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8281 start_new_bblock = 0;
8283 if (cfg->method == method)
8284 cfg->real_offset = ip - header->code;
8286 cfg->real_offset = inline_offset;
8291 if (start_new_bblock) {
8292 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8293 if (start_new_bblock == 2) {
8294 g_assert (ip == tblock->cil_code);
8296 GET_BBLOCK (cfg, tblock, ip);
8298 cfg->cbb->next_bb = tblock;
8300 start_new_bblock = 0;
8301 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8302 if (cfg->verbose_level > 3)
8303 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8304 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8308 g_slist_free (class_inits);
8311 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8312 link_bblock (cfg, cfg->cbb, tblock);
8313 if (sp != stack_start) {
8314 handle_stack_args (cfg, stack_start, sp - stack_start);
8316 CHECK_UNVERIFIABLE (cfg);
8318 cfg->cbb->next_bb = tblock;
8320 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8321 if (cfg->verbose_level > 3)
8322 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8323 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8326 g_slist_free (class_inits);
8331 if (skip_dead_blocks) {
8332 int ip_offset = ip - header->code;
8334 if (ip_offset == bb->end)
8338 int op_size = mono_opcode_size (ip, end);
8339 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8341 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8343 if (ip_offset + op_size == bb->end) {
8344 MONO_INST_NEW (cfg, ins, OP_NOP);
8345 MONO_ADD_INS (cfg->cbb, ins);
8346 start_new_bblock = 1;
8354 * Sequence points are points where the debugger can place a breakpoint.
8355 * Currently, we generate these automatically at points where the IL
8358 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8360 * Make methods interruptable at the beginning, and at the targets of
8361 * backward branches.
8362 * Also, do this at the start of every bblock in methods with clauses too,
8363 * to be able to handle instructions with inprecise control flow like
8365 * Backward branches are handled at the end of method-to-ir ().
8367 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8368 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8370 /* Avoid sequence points on empty IL like .volatile */
8371 // FIXME: Enable this
8372 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8373 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8374 if ((sp != stack_start) && !sym_seq_point)
8375 ins->flags |= MONO_INST_NONEMPTY_STACK;
8376 MONO_ADD_INS (cfg->cbb, ins);
8379 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8382 cfg->cbb->real_offset = cfg->real_offset;
8384 if ((cfg->method == method) && cfg->coverage_info) {
8385 guint32 cil_offset = ip - header->code;
8386 cfg->coverage_info->data [cil_offset].cil_code = ip;
8388 /* TODO: Use an increment here */
8389 #if defined(TARGET_X86)
8390 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8391 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8393 MONO_ADD_INS (cfg->cbb, ins);
8395 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8396 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8400 if (cfg->verbose_level > 3)
8401 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8405 if (seq_points && !sym_seq_points && sp != stack_start) {
8407 * The C# compiler uses these nops to notify the JIT that it should
8408 * insert seq points.
8410 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8411 MONO_ADD_INS (cfg->cbb, ins);
8413 if (cfg->keep_cil_nops)
8414 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8416 MONO_INST_NEW (cfg, ins, OP_NOP);
8418 MONO_ADD_INS (cfg->cbb, ins);
8421 if (should_insert_brekpoint (cfg->method)) {
8422 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8424 MONO_INST_NEW (cfg, ins, OP_NOP);
8427 MONO_ADD_INS (cfg->cbb, ins);
8433 CHECK_STACK_OVF (1);
8434 n = (*ip)-CEE_LDARG_0;
8436 EMIT_NEW_ARGLOAD (cfg, ins, n);
8444 CHECK_STACK_OVF (1);
8445 n = (*ip)-CEE_LDLOC_0;
8447 EMIT_NEW_LOCLOAD (cfg, ins, n);
8456 n = (*ip)-CEE_STLOC_0;
8459 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8461 emit_stloc_ir (cfg, sp, header, n);
8468 CHECK_STACK_OVF (1);
8471 EMIT_NEW_ARGLOAD (cfg, ins, n);
8477 CHECK_STACK_OVF (1);
8480 NEW_ARGLOADA (cfg, ins, n);
8481 MONO_ADD_INS (cfg->cbb, ins);
8491 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8493 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8498 CHECK_STACK_OVF (1);
8501 EMIT_NEW_LOCLOAD (cfg, ins, n);
8505 case CEE_LDLOCA_S: {
8506 unsigned char *tmp_ip;
8508 CHECK_STACK_OVF (1);
8509 CHECK_LOCAL (ip [1]);
8511 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8517 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8526 CHECK_LOCAL (ip [1]);
8527 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8529 emit_stloc_ir (cfg, sp, header, ip [1]);
8534 CHECK_STACK_OVF (1);
8535 EMIT_NEW_PCONST (cfg, ins, NULL);
8536 ins->type = STACK_OBJ;
8541 CHECK_STACK_OVF (1);
8542 EMIT_NEW_ICONST (cfg, ins, -1);
8555 CHECK_STACK_OVF (1);
8556 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8562 CHECK_STACK_OVF (1);
8564 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8570 CHECK_STACK_OVF (1);
8571 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8577 CHECK_STACK_OVF (1);
8578 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8579 ins->type = STACK_I8;
8580 ins->dreg = alloc_dreg (cfg, STACK_I8);
8582 ins->inst_l = (gint64)read64 (ip);
8583 MONO_ADD_INS (cfg->cbb, ins);
8589 gboolean use_aotconst = FALSE;
8591 #ifdef TARGET_POWERPC
8592 /* FIXME: Clean this up */
8593 if (cfg->compile_aot)
8594 use_aotconst = TRUE;
8597 /* FIXME: we should really allocate this only late in the compilation process */
8598 f = mono_domain_alloc (cfg->domain, sizeof (float));
8600 CHECK_STACK_OVF (1);
8606 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8608 dreg = alloc_freg (cfg);
8609 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8610 ins->type = cfg->r4_stack_type;
8612 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8613 ins->type = cfg->r4_stack_type;
8614 ins->dreg = alloc_dreg (cfg, STACK_R8);
8616 MONO_ADD_INS (cfg->cbb, ins);
8626 gboolean use_aotconst = FALSE;
8628 #ifdef TARGET_POWERPC
8629 /* FIXME: Clean this up */
8630 if (cfg->compile_aot)
8631 use_aotconst = TRUE;
8634 /* FIXME: we should really allocate this only late in the compilation process */
8635 d = mono_domain_alloc (cfg->domain, sizeof (double));
8637 CHECK_STACK_OVF (1);
8643 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8645 dreg = alloc_freg (cfg);
8646 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8647 ins->type = STACK_R8;
8649 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8650 ins->type = STACK_R8;
8651 ins->dreg = alloc_dreg (cfg, STACK_R8);
8653 MONO_ADD_INS (cfg->cbb, ins);
8662 MonoInst *temp, *store;
8664 CHECK_STACK_OVF (1);
8668 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8669 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8671 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8674 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8687 if (sp [0]->type == STACK_R8)
8688 /* we need to pop the value from the x86 FP stack */
8689 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8694 MonoMethodSignature *fsig;
8697 INLINE_FAILURE ("jmp");
8698 GSHAREDVT_FAILURE (*ip);
8701 if (stack_start != sp)
8703 token = read32 (ip + 1);
8704 /* FIXME: check the signature matches */
8705 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8707 if (!cmethod || mono_loader_get_last_error ())
8710 if (cfg->gshared && mono_method_check_context_used (cmethod))
8711 GENERIC_SHARING_FAILURE (CEE_JMP);
8713 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8715 fsig = mono_method_signature (cmethod);
8716 n = fsig->param_count + fsig->hasthis;
8717 if (cfg->llvm_only) {
8720 args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8721 for (i = 0; i < n; ++i)
8722 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8723 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8725 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8726 * have to emit a normal return since llvm expects it.
8729 emit_setret (cfg, ins);
8730 MONO_INST_NEW (cfg, ins, OP_BR);
8731 ins->inst_target_bb = end_bblock;
8732 MONO_ADD_INS (cfg->cbb, ins);
8733 link_bblock (cfg, cfg->cbb, end_bblock);
8736 } else if (cfg->backend->have_op_tail_call) {
8737 /* Handle tail calls similarly to calls */
8740 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8741 call->method = cmethod;
8742 call->tail_call = TRUE;
8743 call->signature = mono_method_signature (cmethod);
8744 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8745 call->inst.inst_p0 = cmethod;
8746 for (i = 0; i < n; ++i)
8747 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8749 mono_arch_emit_call (cfg, call);
8750 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8751 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8753 for (i = 0; i < num_args; ++i)
8754 /* Prevent arguments from being optimized away */
8755 arg_array [i]->flags |= MONO_INST_VOLATILE;
8757 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8758 ins = (MonoInst*)call;
8759 ins->inst_p0 = cmethod;
8760 MONO_ADD_INS (cfg->cbb, ins);
8764 start_new_bblock = 1;
8769 MonoMethodSignature *fsig;
8772 token = read32 (ip + 1);
8776 //GSHAREDVT_FAILURE (*ip);
8781 fsig = mini_get_signature (method, token, generic_context);
8783 if (method->dynamic && fsig->pinvoke) {
8787 * This is a call through a function pointer using a pinvoke
8788 * signature. Have to create a wrapper and call that instead.
8789 * FIXME: This is very slow, need to create a wrapper at JIT time
8790 * instead based on the signature.
8792 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8793 EMIT_NEW_PCONST (cfg, args [1], fsig);
8795 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8798 n = fsig->param_count + fsig->hasthis;
8802 //g_assert (!virtual || fsig->hasthis);
8806 inline_costs += 10 * num_calls++;
8809 * Making generic calls out of gsharedvt methods.
8810 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8811 * patching gshared method addresses into a gsharedvt method.
8813 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8815 * We pass the address to the gsharedvt trampoline in the rgctx reg
8817 MonoInst *callee = addr;
8819 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8821 GSHAREDVT_FAILURE (*ip);
8823 addr = emit_get_rgctx_sig (cfg, context_used,
8824 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8825 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8829 /* Prevent inlining of methods with indirect calls */
8830 INLINE_FAILURE ("indirect call");
8832 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8837 * Instead of emitting an indirect call, emit a direct call
8838 * with the contents of the aotconst as the patch info.
8840 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8841 info_type = addr->inst_c1;
8842 info_data = addr->inst_p0;
8844 info_type = addr->inst_right->inst_c1;
8845 info_data = addr->inst_right->inst_left;
8848 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8849 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8854 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8858 /* End of call, INS should contain the result of the call, if any */
8860 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8862 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8865 CHECK_CFG_EXCEPTION;
8869 constrained_class = NULL;
8873 case CEE_CALLVIRT: {
8874 MonoInst *addr = NULL;
8875 MonoMethodSignature *fsig = NULL;
8877 int virtual = *ip == CEE_CALLVIRT;
8878 gboolean pass_imt_from_rgctx = FALSE;
8879 MonoInst *imt_arg = NULL;
8880 MonoInst *keep_this_alive = NULL;
8881 gboolean pass_vtable = FALSE;
8882 gboolean pass_mrgctx = FALSE;
8883 MonoInst *vtable_arg = NULL;
8884 gboolean check_this = FALSE;
8885 gboolean supported_tail_call = FALSE;
8886 gboolean tail_call = FALSE;
8887 gboolean need_seq_point = FALSE;
8888 guint32 call_opcode = *ip;
8889 gboolean emit_widen = TRUE;
8890 gboolean push_res = TRUE;
8891 gboolean skip_ret = FALSE;
8892 gboolean delegate_invoke = FALSE;
8893 gboolean direct_icall = FALSE;
8894 gboolean constrained_partial_call = FALSE;
8895 MonoMethod *cil_method;
8898 token = read32 (ip + 1);
8902 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8903 cil_method = cmethod;
8905 if (constrained_class) {
8906 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8907 if (!mini_is_gsharedvt_klass (constrained_class)) {
8908 g_assert (!cmethod->klass->valuetype);
8909 if (!mini_type_is_reference (&constrained_class->byval_arg))
8910 constrained_partial_call = TRUE;
8914 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8915 if (cfg->verbose_level > 2)
8916 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8917 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8918 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8920 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8924 if (cfg->verbose_level > 2)
8925 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8927 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8929 * This is needed since get_method_constrained can't find
8930 * the method in klass representing a type var.
8931 * The type var is guaranteed to be a reference type in this
8934 if (!mini_is_gsharedvt_klass (constrained_class))
8935 g_assert (!cmethod->klass->valuetype);
8937 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8943 if (!cmethod || mono_loader_get_last_error ())
8945 if (!dont_verify && !cfg->skip_visibility) {
8946 MonoMethod *target_method = cil_method;
8947 if (method->is_inflated) {
8948 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8950 if (!mono_method_can_access_method (method_definition, target_method) &&
8951 !mono_method_can_access_method (method, cil_method))
8952 METHOD_ACCESS_FAILURE (method, cil_method);
8955 if (mono_security_core_clr_enabled ())
8956 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8958 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8959 /* MS.NET seems to silently convert this to a callvirt */
8964 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8965 * converts to a callvirt.
8967 * tests/bug-515884.il is an example of this behavior
8969 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8970 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8971 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8975 if (!cmethod->klass->inited)
8976 if (!mono_class_init (cmethod->klass))
8977 TYPE_LOAD_ERROR (cmethod->klass);
8979 fsig = mono_method_signature (cmethod);
8982 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8983 mini_class_is_system_array (cmethod->klass)) {
8984 array_rank = cmethod->klass->rank;
8985 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8986 direct_icall = TRUE;
8987 } else if (fsig->pinvoke) {
8988 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8989 fsig = mono_method_signature (wrapper);
8990 } else if (constrained_class) {
8992 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8996 /* See code below */
8997 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8998 MonoBasicBlock *tbb;
9000 GET_BBLOCK (cfg, tbb, ip + 5);
9001 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9003 * We want to extend the try block to cover the call, but we can't do it if the
9004 * call is made directly since its followed by an exception check.
9006 direct_icall = FALSE;
9010 mono_save_token_info (cfg, image, token, cil_method);
9012 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9013 need_seq_point = TRUE;
9015 /* Don't support calls made using type arguments for now */
9017 if (cfg->gsharedvt) {
9018 if (mini_is_gsharedvt_signature (fsig))
9019 GSHAREDVT_FAILURE (*ip);
9023 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9024 g_assert_not_reached ();
9026 n = fsig->param_count + fsig->hasthis;
9028 if (!cfg->gshared && cmethod->klass->generic_container)
9032 g_assert (!mono_method_check_context_used (cmethod));
9036 //g_assert (!virtual || fsig->hasthis);
9040 if (constrained_class) {
9041 if (mini_is_gsharedvt_klass (constrained_class)) {
9042 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9043 /* The 'Own method' case below */
9044 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9045 /* 'The type parameter is instantiated as a reference type' case below. */
9047 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9048 CHECK_CFG_EXCEPTION;
9055 * We have the `constrained.' prefix opcode.
9057 if (constrained_partial_call) {
9058 gboolean need_box = TRUE;
9061 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9062 * called method is not known at compile time either. The called method could end up being
9063 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9064 * to box the receiver.
9065 * A simple solution would be to box always and make a normal virtual call, but that would
9066 * be bad performance wise.
9068 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9070 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9075 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9076 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9077 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9078 ins->klass = constrained_class;
9079 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9080 CHECK_CFG_EXCEPTION;
9081 } else if (need_box) {
9083 MonoBasicBlock *is_ref_bb, *end_bb;
9084 MonoInst *nonbox_call;
9087 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9089 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9090 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9092 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9094 NEW_BBLOCK (cfg, is_ref_bb);
9095 NEW_BBLOCK (cfg, end_bb);
9097 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9098 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
9099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9102 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9104 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9107 MONO_START_BB (cfg, is_ref_bb);
9108 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9109 ins->klass = constrained_class;
9110 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9111 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9113 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9115 MONO_START_BB (cfg, end_bb);
9118 nonbox_call->dreg = ins->dreg;
9121 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9122 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9123 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9126 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9128 * The type parameter is instantiated as a valuetype,
9129 * but that type doesn't override the method we're
9130 * calling, so we need to box `this'.
9132 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9133 ins->klass = constrained_class;
9134 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9135 CHECK_CFG_EXCEPTION;
9136 } else if (!constrained_class->valuetype) {
9137 int dreg = alloc_ireg_ref (cfg);
9140 * The type parameter is instantiated as a reference
9141 * type. We have a managed pointer on the stack, so
9142 * we need to dereference it here.
9144 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9145 ins->type = STACK_OBJ;
9148 if (cmethod->klass->valuetype) {
9151 /* Interface method */
9154 mono_class_setup_vtable (constrained_class);
9155 CHECK_TYPELOAD (constrained_class);
9156 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9158 TYPE_LOAD_ERROR (constrained_class);
9159 slot = mono_method_get_vtable_slot (cmethod);
9161 TYPE_LOAD_ERROR (cmethod->klass);
9162 cmethod = constrained_class->vtable [ioffset + slot];
9164 if (cmethod->klass == mono_defaults.enum_class) {
9165 /* Enum implements some interfaces, so treat this as the first case */
9166 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9167 ins->klass = constrained_class;
9168 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9169 CHECK_CFG_EXCEPTION;
9174 constrained_class = NULL;
9177 if (check_call_signature (cfg, fsig, sp))
9180 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9181 delegate_invoke = TRUE;
9183 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9184 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9185 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9193 * If the callee is a shared method, then its static cctor
9194 * might not get called after the call was patched.
9196 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9197 emit_class_init (cfg, cmethod->klass);
9198 CHECK_TYPELOAD (cmethod->klass);
9201 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9204 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9206 context_used = mini_method_check_context_used (cfg, cmethod);
9208 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9209 /* Generic method interface
9210 calls are resolved via a
9211 helper function and don't
9213 if (!cmethod_context || !cmethod_context->method_inst)
9214 pass_imt_from_rgctx = TRUE;
9218 * If a shared method calls another
9219 * shared method then the caller must
9220 * have a generic sharing context
9221 * because the magic trampoline
9222 * requires it. FIXME: We shouldn't
9223 * have to force the vtable/mrgctx
9224 * variable here. Instead there
9225 * should be a flag in the cfg to
9226 * request a generic sharing context.
9229 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9230 mono_get_vtable_var (cfg);
9235 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9237 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9239 CHECK_TYPELOAD (cmethod->klass);
9240 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9245 g_assert (!vtable_arg);
9247 if (!cfg->compile_aot) {
9249 * emit_get_rgctx_method () calls mono_class_vtable () so check
9250 * for type load errors before.
9252 mono_class_setup_vtable (cmethod->klass);
9253 CHECK_TYPELOAD (cmethod->klass);
9256 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9258 /* !marshalbyref is needed to properly handle generic methods + remoting */
9259 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9260 MONO_METHOD_IS_FINAL (cmethod)) &&
9261 !mono_class_is_marshalbyref (cmethod->klass)) {
9268 if (pass_imt_from_rgctx) {
9269 g_assert (!pass_vtable);
9271 imt_arg = emit_get_rgctx_method (cfg, context_used,
9272 cmethod, MONO_RGCTX_INFO_METHOD);
9276 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9278 /* Calling virtual generic methods */
9279 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9280 !(MONO_METHOD_IS_FINAL (cmethod) &&
9281 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9282 fsig->generic_param_count &&
9283 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9285 MonoInst *this_temp, *this_arg_temp, *store;
9286 MonoInst *iargs [4];
9288 g_assert (fsig->is_inflated);
9290 /* Prevent inlining of methods that contain indirect calls */
9291 INLINE_FAILURE ("virtual generic call");
9293 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9294 GSHAREDVT_FAILURE (*ip);
9296 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9297 g_assert (!imt_arg);
9299 g_assert (cmethod->is_inflated);
9300 imt_arg = emit_get_rgctx_method (cfg, context_used,
9301 cmethod, MONO_RGCTX_INFO_METHOD);
9302 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9304 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9305 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9306 MONO_ADD_INS (cfg->cbb, store);
9308 /* FIXME: This should be a managed pointer */
9309 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9311 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9312 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9313 cmethod, MONO_RGCTX_INFO_METHOD);
9314 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9315 addr = mono_emit_jit_icall (cfg,
9316 mono_helper_compile_generic_method, iargs);
9318 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9320 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9327 * Implement a workaround for the inherent races involved in locking:
9333 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9334 * try block, the Exit () won't be executed, see:
9335 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9336 * To work around this, we extend such try blocks to include the last x bytes
9337 * of the Monitor.Enter () call.
9339 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9340 MonoBasicBlock *tbb;
9342 GET_BBLOCK (cfg, tbb, ip + 5);
9344 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9345 * from Monitor.Enter like ArgumentNullException.
9347 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9348 /* Mark this bblock as needing to be extended */
9349 tbb->extend_try_block = TRUE;
9353 /* Conversion to a JIT intrinsic */
9354 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9355 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9356 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9363 if ((cfg->opt & MONO_OPT_INLINE) &&
9364 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9365 mono_method_check_inlining (cfg, cmethod)) {
9367 gboolean always = FALSE;
9369 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9370 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9371 /* Prevent inlining of methods that call wrappers */
9372 INLINE_FAILURE ("wrapper call");
9373 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9377 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9379 cfg->real_offset += 5;
9381 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9382 /* *sp is already set by inline_method */
9387 inline_costs += costs;
9393 /* Tail recursion elimination */
9394 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9395 gboolean has_vtargs = FALSE;
9398 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9399 INLINE_FAILURE ("tail call");
9401 /* keep it simple */
9402 for (i = fsig->param_count - 1; i >= 0; i--) {
9403 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9408 for (i = 0; i < n; ++i)
9409 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9410 MONO_INST_NEW (cfg, ins, OP_BR);
9411 MONO_ADD_INS (cfg->cbb, ins);
9412 tblock = start_bblock->out_bb [0];
9413 link_bblock (cfg, cfg->cbb, tblock);
9414 ins->inst_target_bb = tblock;
9415 start_new_bblock = 1;
9417 /* skip the CEE_RET, too */
9418 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9425 inline_costs += 10 * num_calls++;
9428 * Making generic calls out of gsharedvt methods.
9429 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9430 * patching gshared method addresses into a gsharedvt method.
9432 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9433 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9434 MonoRgctxInfoType info_type;
9437 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9438 //GSHAREDVT_FAILURE (*ip);
9439 // disable for possible remoting calls
9440 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9441 GSHAREDVT_FAILURE (*ip);
9442 if (fsig->generic_param_count) {
9443 /* virtual generic call */
9444 g_assert (!imt_arg);
9445 /* Same as the virtual generic case above */
9446 imt_arg = emit_get_rgctx_method (cfg, context_used,
9447 cmethod, MONO_RGCTX_INFO_METHOD);
9448 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9450 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9451 /* This can happen when we call a fully instantiated iface method */
9452 imt_arg = emit_get_rgctx_method (cfg, context_used,
9453 cmethod, MONO_RGCTX_INFO_METHOD);
9458 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9459 keep_this_alive = sp [0];
9461 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9462 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9464 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9465 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9467 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9471 /* Generic sharing */
9474 * Use this if the callee is gsharedvt sharable too, since
9475 * at runtime we might find an instantiation so the call cannot
9476 * be patched (the 'no_patch' code path in mini-trampolines.c).
9478 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9479 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9480 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9481 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9482 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9483 INLINE_FAILURE ("gshared");
9485 g_assert (cfg->gshared && cmethod);
9489 * We are compiling a call to a
9490 * generic method from shared code,
9491 * which means that we have to look up
9492 * the method in the rgctx and do an
9496 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9498 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9499 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9503 /* Direct calls to icalls */
9505 MonoMethod *wrapper;
9508 /* Inline the wrapper */
9509 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9511 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9512 g_assert (costs > 0);
9513 cfg->real_offset += 5;
9515 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9516 /* *sp is already set by inline_method */
9521 inline_costs += costs;
9530 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9531 MonoInst *val = sp [fsig->param_count];
9533 if (val->type == STACK_OBJ) {
9534 MonoInst *iargs [2];
9539 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9542 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9543 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9544 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9545 emit_write_barrier (cfg, addr, val);
9546 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9547 GSHAREDVT_FAILURE (*ip);
9548 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9549 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9551 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9552 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9553 if (!cmethod->klass->element_class->valuetype && !readonly)
9554 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9555 CHECK_TYPELOAD (cmethod->klass);
9558 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9561 g_assert_not_reached ();
9568 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9572 /* Tail prefix / tail call optimization */
9574 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9575 /* FIXME: runtime generic context pointer for jumps? */
9576 /* FIXME: handle this for generic sharing eventually */
9577 if ((ins_flag & MONO_INST_TAILCALL) &&
9578 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9579 supported_tail_call = TRUE;
9581 if (supported_tail_call) {
9584 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9585 INLINE_FAILURE ("tail call");
9587 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9589 if (cfg->backend->have_op_tail_call) {
9590 /* Handle tail calls similarly to normal calls */
9593 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9595 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9596 call->tail_call = TRUE;
9597 call->method = cmethod;
9598 call->signature = mono_method_signature (cmethod);
9601 * We implement tail calls by storing the actual arguments into the
9602 * argument variables, then emitting a CEE_JMP.
9604 for (i = 0; i < n; ++i) {
9605 /* Prevent argument from being register allocated */
9606 arg_array [i]->flags |= MONO_INST_VOLATILE;
9607 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9609 ins = (MonoInst*)call;
9610 ins->inst_p0 = cmethod;
9611 ins->inst_p1 = arg_array [0];
9612 MONO_ADD_INS (cfg->cbb, ins);
9613 link_bblock (cfg, cfg->cbb, end_bblock);
9614 start_new_bblock = 1;
9616 // FIXME: Eliminate unreachable epilogs
9619 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9620 * only reachable from this call.
9622 GET_BBLOCK (cfg, tblock, ip + 5);
9623 if (tblock == cfg->cbb || tblock->in_count == 0)
9632 * Synchronized wrappers.
9633 * Its hard to determine where to replace a method with its synchronized
9634 * wrapper without causing an infinite recursion. The current solution is
9635 * to add the synchronized wrapper in the trampolines, and to
9636 * change the called method to a dummy wrapper, and resolve that wrapper
9637 * to the real method in mono_jit_compile_method ().
9639 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9640 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9641 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9642 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9646 * Interface calls in llvm-only mode are complicated becase the callee might need an rgctx arg,
9647 * (i.e. its a vtype method), and there is no way to for the caller to know this at compile time.
9648 * So we make resolve_iface_call return the rgctx, and do two calls with different signatures
9649 * based on whenever there is an rgctx or not.
9651 if (cfg->llvm_only && virtual && cmethod && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9652 MonoInst *args_buf [16], *icall_args [16];
9654 MonoBasicBlock *rgctx_bb, *end_bb;
9655 MonoInst *call1, *call2, *call_target;
9656 MonoMethodSignature *rgctx_sig;
9657 int rgctx_reg, tmp_reg;
9659 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9661 NEW_BBLOCK (cfg, rgctx_bb);
9662 NEW_BBLOCK (cfg, end_bb);
9664 // FIXME: Optimize this
9666 guint32 imt_slot = mono_method_get_imt_slot (cmethod);
9668 icall_args [0] = sp [0];
9669 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
9671 icall_args [2] = imt_arg;
9673 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, cmethod);
9674 icall_args [2] = ins;
9677 rgctx_reg = alloc_preg (cfg);
9678 MONO_EMIT_NEW_PCONST (cfg, rgctx_reg, NULL);
9679 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], rgctx_reg, &mono_defaults.int_class->byval_arg);
9680 //EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
9682 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
9684 // FIXME: Only do this if needed (generic calls)
9686 // Check whenever to pass an rgctx
9687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
9688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, rgctx_bb);
9689 /* Non rgctx case */
9690 call1 = mono_emit_calli (cfg, fsig, sp, call_target, NULL, vtable_arg);
9691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9693 MONO_START_BB (cfg, rgctx_bb);
9694 /* Make a call with an rgctx */
9695 if (fsig->param_count + 2 < 16)
9698 args = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
9700 for (i = 0; i < fsig->param_count; ++i)
9701 args [i + 1] = sp [i + 1];
9702 tmp_reg = alloc_preg (cfg);
9703 EMIT_NEW_UNALU (cfg, args [fsig->param_count + 1], OP_MOVE, tmp_reg, rgctx_reg);
9704 rgctx_sig = sig_to_rgctx_sig (fsig);
9705 call2 = mono_emit_calli (cfg, rgctx_sig, args, call_target, NULL, NULL);
9706 call2->dreg = call1->dreg;
9707 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9709 MONO_START_BB (cfg, end_bb);
9715 INLINE_FAILURE ("call");
9716 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9717 imt_arg, vtable_arg);
9719 if (tail_call && !cfg->llvm_only) {
9720 link_bblock (cfg, cfg->cbb, end_bblock);
9721 start_new_bblock = 1;
9723 // FIXME: Eliminate unreachable epilogs
9726 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9727 * only reachable from this call.
9729 GET_BBLOCK (cfg, tblock, ip + 5);
9730 if (tblock == cfg->cbb || tblock->in_count == 0)
9737 /* End of call, INS should contain the result of the call, if any */
9739 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9742 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9747 if (keep_this_alive) {
9748 MonoInst *dummy_use;
9750 /* See mono_emit_method_call_full () */
9751 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9754 CHECK_CFG_EXCEPTION;
9758 g_assert (*ip == CEE_RET);
9762 constrained_class = NULL;
9764 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9768 if (cfg->method != method) {
9769 /* return from inlined method */
9771 * If in_count == 0, that means the ret is unreachable due to
9772 * being preceeded by a throw. In that case, inline_method () will
9773 * handle setting the return value
9774 * (test case: test_0_inline_throw ()).
9776 if (return_var && cfg->cbb->in_count) {
9777 MonoType *ret_type = mono_method_signature (method)->ret;
9783 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9786 //g_assert (returnvar != -1);
9787 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9788 cfg->ret_var_set = TRUE;
9791 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9793 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9797 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9799 if (seq_points && !sym_seq_points) {
9801 * Place a seq point here too even through the IL stack is not
9802 * empty, so a step over on
9805 * will work correctly.
9807 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9808 MONO_ADD_INS (cfg->cbb, ins);
9811 g_assert (!return_var);
9815 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9818 emit_setret (cfg, *sp);
9821 if (sp != stack_start)
9823 MONO_INST_NEW (cfg, ins, OP_BR);
9825 ins->inst_target_bb = end_bblock;
9826 MONO_ADD_INS (cfg->cbb, ins);
9827 link_bblock (cfg, cfg->cbb, end_bblock);
9828 start_new_bblock = 1;
9832 MONO_INST_NEW (cfg, ins, OP_BR);
9834 target = ip + 1 + (signed char)(*ip);
9836 GET_BBLOCK (cfg, tblock, target);
9837 link_bblock (cfg, cfg->cbb, tblock);
9838 ins->inst_target_bb = tblock;
9839 if (sp != stack_start) {
9840 handle_stack_args (cfg, stack_start, sp - stack_start);
9842 CHECK_UNVERIFIABLE (cfg);
9844 MONO_ADD_INS (cfg->cbb, ins);
9845 start_new_bblock = 1;
9846 inline_costs += BRANCH_COST;
9860 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9862 target = ip + 1 + *(signed char*)ip;
9868 inline_costs += BRANCH_COST;
9872 MONO_INST_NEW (cfg, ins, OP_BR);
9875 target = ip + 4 + (gint32)read32(ip);
9877 GET_BBLOCK (cfg, tblock, target);
9878 link_bblock (cfg, cfg->cbb, tblock);
9879 ins->inst_target_bb = tblock;
9880 if (sp != stack_start) {
9881 handle_stack_args (cfg, stack_start, sp - stack_start);
9883 CHECK_UNVERIFIABLE (cfg);
9886 MONO_ADD_INS (cfg->cbb, ins);
9888 start_new_bblock = 1;
9889 inline_costs += BRANCH_COST;
9896 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9897 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9898 guint32 opsize = is_short ? 1 : 4;
9900 CHECK_OPSIZE (opsize);
9902 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9905 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9910 GET_BBLOCK (cfg, tblock, target);
9911 link_bblock (cfg, cfg->cbb, tblock);
9912 GET_BBLOCK (cfg, tblock, ip);
9913 link_bblock (cfg, cfg->cbb, tblock);
9915 if (sp != stack_start) {
9916 handle_stack_args (cfg, stack_start, sp - stack_start);
9917 CHECK_UNVERIFIABLE (cfg);
9920 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9921 cmp->sreg1 = sp [0]->dreg;
9922 type_from_op (cfg, cmp, sp [0], NULL);
9925 #if SIZEOF_REGISTER == 4
9926 if (cmp->opcode == OP_LCOMPARE_IMM) {
9927 /* Convert it to OP_LCOMPARE */
9928 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9929 ins->type = STACK_I8;
9930 ins->dreg = alloc_dreg (cfg, STACK_I8);
9932 MONO_ADD_INS (cfg->cbb, ins);
9933 cmp->opcode = OP_LCOMPARE;
9934 cmp->sreg2 = ins->dreg;
9937 MONO_ADD_INS (cfg->cbb, cmp);
9939 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9940 type_from_op (cfg, ins, sp [0], NULL);
9941 MONO_ADD_INS (cfg->cbb, ins);
9942 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9943 GET_BBLOCK (cfg, tblock, target);
9944 ins->inst_true_bb = tblock;
9945 GET_BBLOCK (cfg, tblock, ip);
9946 ins->inst_false_bb = tblock;
9947 start_new_bblock = 2;
9950 inline_costs += BRANCH_COST;
9965 MONO_INST_NEW (cfg, ins, *ip);
9967 target = ip + 4 + (gint32)read32(ip);
9973 inline_costs += BRANCH_COST;
9977 MonoBasicBlock **targets;
9978 MonoBasicBlock *default_bblock;
9979 MonoJumpInfoBBTable *table;
9980 int offset_reg = alloc_preg (cfg);
9981 int target_reg = alloc_preg (cfg);
9982 int table_reg = alloc_preg (cfg);
9983 int sum_reg = alloc_preg (cfg);
9984 gboolean use_op_switch;
9988 n = read32 (ip + 1);
9991 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9995 CHECK_OPSIZE (n * sizeof (guint32));
9996 target = ip + n * sizeof (guint32);
9998 GET_BBLOCK (cfg, default_bblock, target);
9999 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10001 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10002 for (i = 0; i < n; ++i) {
10003 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10004 targets [i] = tblock;
10005 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10009 if (sp != stack_start) {
10011 * Link the current bb with the targets as well, so handle_stack_args
10012 * will set their in_stack correctly.
10014 link_bblock (cfg, cfg->cbb, default_bblock);
10015 for (i = 0; i < n; ++i)
10016 link_bblock (cfg, cfg->cbb, targets [i]);
10018 handle_stack_args (cfg, stack_start, sp - stack_start);
10020 CHECK_UNVERIFIABLE (cfg);
10022 /* Undo the links */
10023 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10024 for (i = 0; i < n; ++i)
10025 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10028 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10029 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10031 for (i = 0; i < n; ++i)
10032 link_bblock (cfg, cfg->cbb, targets [i]);
10034 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10035 table->table = targets;
10036 table->table_size = n;
10038 use_op_switch = FALSE;
10040 /* ARM implements SWITCH statements differently */
10041 /* FIXME: Make it use the generic implementation */
10042 if (!cfg->compile_aot)
10043 use_op_switch = TRUE;
10046 if (COMPILE_LLVM (cfg))
10047 use_op_switch = TRUE;
10049 cfg->cbb->has_jump_table = 1;
10051 if (use_op_switch) {
10052 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10053 ins->sreg1 = src1->dreg;
10054 ins->inst_p0 = table;
10055 ins->inst_many_bb = targets;
10056 ins->klass = GUINT_TO_POINTER (n);
10057 MONO_ADD_INS (cfg->cbb, ins);
10059 if (sizeof (gpointer) == 8)
10060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10062 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10064 #if SIZEOF_REGISTER == 8
10065 /* The upper word might not be zero, and we add it to a 64 bit address later */
10066 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10069 if (cfg->compile_aot) {
10070 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10072 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10073 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10074 ins->inst_p0 = table;
10075 ins->dreg = table_reg;
10076 MONO_ADD_INS (cfg->cbb, ins);
10079 /* FIXME: Use load_memindex */
10080 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10081 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10082 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10084 start_new_bblock = 1;
10085 inline_costs += (BRANCH_COST * 2);
10098 case CEE_LDIND_REF:
10105 dreg = alloc_freg (cfg);
10108 dreg = alloc_lreg (cfg);
10110 case CEE_LDIND_REF:
10111 dreg = alloc_ireg_ref (cfg);
10114 dreg = alloc_preg (cfg);
10117 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10118 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10119 if (*ip == CEE_LDIND_R4)
10120 ins->type = cfg->r4_stack_type;
10121 ins->flags |= ins_flag;
10122 MONO_ADD_INS (cfg->cbb, ins);
10124 if (ins_flag & MONO_INST_VOLATILE) {
10125 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10126 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10131 case CEE_STIND_REF:
10142 if (ins_flag & MONO_INST_VOLATILE) {
10143 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10144 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10147 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10148 ins->flags |= ins_flag;
10151 MONO_ADD_INS (cfg->cbb, ins);
10153 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10154 emit_write_barrier (cfg, sp [0], sp [1]);
10163 MONO_INST_NEW (cfg, ins, (*ip));
10165 ins->sreg1 = sp [0]->dreg;
10166 ins->sreg2 = sp [1]->dreg;
10167 type_from_op (cfg, ins, sp [0], sp [1]);
10169 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10171 /* Use the immediate opcodes if possible */
10172 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10173 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10174 if (imm_opcode != -1) {
10175 ins->opcode = imm_opcode;
10176 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10179 NULLIFY_INS (sp [1]);
10183 MONO_ADD_INS ((cfg)->cbb, (ins));
10185 *sp++ = mono_decompose_opcode (cfg, ins);
10202 MONO_INST_NEW (cfg, ins, (*ip));
10204 ins->sreg1 = sp [0]->dreg;
10205 ins->sreg2 = sp [1]->dreg;
10206 type_from_op (cfg, ins, sp [0], sp [1]);
10208 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10209 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10211 /* FIXME: Pass opcode to is_inst_imm */
10213 /* Use the immediate opcodes if possible */
10214 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10217 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10218 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10219 /* Keep emulated opcodes which are optimized away later */
10220 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10221 imm_opcode = mono_op_to_op_imm (ins->opcode);
10224 if (imm_opcode != -1) {
10225 ins->opcode = imm_opcode;
10226 if (sp [1]->opcode == OP_I8CONST) {
10227 #if SIZEOF_REGISTER == 8
10228 ins->inst_imm = sp [1]->inst_l;
10230 ins->inst_ls_word = sp [1]->inst_ls_word;
10231 ins->inst_ms_word = sp [1]->inst_ms_word;
10235 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10238 /* Might be followed by an instruction added by add_widen_op */
10239 if (sp [1]->next == NULL)
10240 NULLIFY_INS (sp [1]);
10243 MONO_ADD_INS ((cfg)->cbb, (ins));
10245 *sp++ = mono_decompose_opcode (cfg, ins);
10258 case CEE_CONV_OVF_I8:
10259 case CEE_CONV_OVF_U8:
10260 case CEE_CONV_R_UN:
10263 /* Special case this earlier so we have long constants in the IR */
10264 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10265 int data = sp [-1]->inst_c0;
10266 sp [-1]->opcode = OP_I8CONST;
10267 sp [-1]->type = STACK_I8;
10268 #if SIZEOF_REGISTER == 8
10269 if ((*ip) == CEE_CONV_U8)
10270 sp [-1]->inst_c0 = (guint32)data;
10272 sp [-1]->inst_c0 = data;
10274 sp [-1]->inst_ls_word = data;
10275 if ((*ip) == CEE_CONV_U8)
10276 sp [-1]->inst_ms_word = 0;
10278 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10280 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10287 case CEE_CONV_OVF_I4:
10288 case CEE_CONV_OVF_I1:
10289 case CEE_CONV_OVF_I2:
10290 case CEE_CONV_OVF_I:
10291 case CEE_CONV_OVF_U:
10294 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10295 ADD_UNOP (CEE_CONV_OVF_I8);
10302 case CEE_CONV_OVF_U1:
10303 case CEE_CONV_OVF_U2:
10304 case CEE_CONV_OVF_U4:
10307 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10308 ADD_UNOP (CEE_CONV_OVF_U8);
10315 case CEE_CONV_OVF_I1_UN:
10316 case CEE_CONV_OVF_I2_UN:
10317 case CEE_CONV_OVF_I4_UN:
10318 case CEE_CONV_OVF_I8_UN:
10319 case CEE_CONV_OVF_U1_UN:
10320 case CEE_CONV_OVF_U2_UN:
10321 case CEE_CONV_OVF_U4_UN:
10322 case CEE_CONV_OVF_U8_UN:
10323 case CEE_CONV_OVF_I_UN:
10324 case CEE_CONV_OVF_U_UN:
10331 CHECK_CFG_EXCEPTION;
10335 case CEE_ADD_OVF_UN:
10337 case CEE_MUL_OVF_UN:
10339 case CEE_SUB_OVF_UN:
10345 GSHAREDVT_FAILURE (*ip);
10348 token = read32 (ip + 1);
10349 klass = mini_get_class (method, token, generic_context);
10350 CHECK_TYPELOAD (klass);
10352 if (generic_class_is_reference_type (cfg, klass)) {
10353 MonoInst *store, *load;
10354 int dreg = alloc_ireg_ref (cfg);
10356 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10357 load->flags |= ins_flag;
10358 MONO_ADD_INS (cfg->cbb, load);
10360 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10361 store->flags |= ins_flag;
10362 MONO_ADD_INS (cfg->cbb, store);
10364 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10365 emit_write_barrier (cfg, sp [0], sp [1]);
10367 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10373 int loc_index = -1;
10379 token = read32 (ip + 1);
10380 klass = mini_get_class (method, token, generic_context);
10381 CHECK_TYPELOAD (klass);
10383 /* Optimize the common ldobj+stloc combination */
10386 loc_index = ip [6];
10393 loc_index = ip [5] - CEE_STLOC_0;
10400 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10401 CHECK_LOCAL (loc_index);
10403 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10404 ins->dreg = cfg->locals [loc_index]->dreg;
10405 ins->flags |= ins_flag;
10408 if (ins_flag & MONO_INST_VOLATILE) {
10409 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10410 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10416 /* Optimize the ldobj+stobj combination */
10417 /* The reference case ends up being a load+store anyway */
10418 /* Skip this if the operation is volatile. */
10419 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10424 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10431 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10432 ins->flags |= ins_flag;
10435 if (ins_flag & MONO_INST_VOLATILE) {
10436 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10437 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10446 CHECK_STACK_OVF (1);
10448 n = read32 (ip + 1);
10450 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10451 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10452 ins->type = STACK_OBJ;
10455 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10456 MonoInst *iargs [1];
10457 char *str = mono_method_get_wrapper_data (method, n);
10459 if (cfg->compile_aot)
10460 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10462 EMIT_NEW_PCONST (cfg, iargs [0], str);
10463 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10465 if (cfg->opt & MONO_OPT_SHARED) {
10466 MonoInst *iargs [3];
10468 if (cfg->compile_aot) {
10469 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10471 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10472 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10473 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10474 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10475 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10477 if (cfg->cbb->out_of_line) {
10478 MonoInst *iargs [2];
10480 if (image == mono_defaults.corlib) {
10482 * Avoid relocations in AOT and save some space by using a
10483 * version of helper_ldstr specialized to mscorlib.
10485 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10486 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10488 /* Avoid creating the string object */
10489 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10490 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10491 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10495 if (cfg->compile_aot) {
10496 NEW_LDSTRCONST (cfg, ins, image, n);
10498 MONO_ADD_INS (cfg->cbb, ins);
10501 NEW_PCONST (cfg, ins, NULL);
10502 ins->type = STACK_OBJ;
10503 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10505 OUT_OF_MEMORY_FAILURE;
10508 MONO_ADD_INS (cfg->cbb, ins);
10517 MonoInst *iargs [2];
10518 MonoMethodSignature *fsig;
10521 MonoInst *vtable_arg = NULL;
10524 token = read32 (ip + 1);
10525 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10526 if (!cmethod || mono_loader_get_last_error ())
10528 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10531 mono_save_token_info (cfg, image, token, cmethod);
10533 if (!mono_class_init (cmethod->klass))
10534 TYPE_LOAD_ERROR (cmethod->klass);
10536 context_used = mini_method_check_context_used (cfg, cmethod);
10538 if (mono_security_core_clr_enabled ())
10539 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10541 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10542 emit_class_init (cfg, cmethod->klass);
10543 CHECK_TYPELOAD (cmethod->klass);
10547 if (cfg->gsharedvt) {
10548 if (mini_is_gsharedvt_variable_signature (sig))
10549 GSHAREDVT_FAILURE (*ip);
10553 n = fsig->param_count;
10557 * Generate smaller code for the common newobj <exception> instruction in
10558 * argument checking code.
10560 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10561 is_exception_class (cmethod->klass) && n <= 2 &&
10562 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10563 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10564 MonoInst *iargs [3];
10568 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10571 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10574 iargs [1] = sp [0];
10575 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10578 iargs [1] = sp [0];
10579 iargs [2] = sp [1];
10580 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10583 g_assert_not_reached ();
10591 /* move the args to allow room for 'this' in the first position */
10597 /* check_call_signature () requires sp[0] to be set */
10598 this_ins.type = STACK_OBJ;
10599 sp [0] = &this_ins;
10600 if (check_call_signature (cfg, fsig, sp))
10605 if (mini_class_is_system_array (cmethod->klass)) {
10606 *sp = emit_get_rgctx_method (cfg, context_used,
10607 cmethod, MONO_RGCTX_INFO_METHOD);
10609 /* Avoid varargs in the common case */
10610 if (fsig->param_count == 1)
10611 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10612 else if (fsig->param_count == 2)
10613 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10614 else if (fsig->param_count == 3)
10615 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10616 else if (fsig->param_count == 4)
10617 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10619 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10620 } else if (cmethod->string_ctor) {
10621 g_assert (!context_used);
10622 g_assert (!vtable_arg);
10623 /* we simply pass a null pointer */
10624 EMIT_NEW_PCONST (cfg, *sp, NULL);
10625 /* now call the string ctor */
10626 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10628 if (cmethod->klass->valuetype) {
10629 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10630 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10631 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10636 * The code generated by mini_emit_virtual_call () expects
10637 * iargs [0] to be a boxed instance, but luckily the vcall
10638 * will be transformed into a normal call there.
10640 } else if (context_used) {
10641 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10644 MonoVTable *vtable = NULL;
10646 if (!cfg->compile_aot)
10647 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10648 CHECK_TYPELOAD (cmethod->klass);
10651 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10652 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10653 * As a workaround, we call class cctors before allocating objects.
10655 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10656 emit_class_init (cfg, cmethod->klass);
10657 if (cfg->verbose_level > 2)
10658 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10659 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10662 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10665 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10668 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10670 /* Now call the actual ctor */
10671 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10672 CHECK_CFG_EXCEPTION;
10675 if (alloc == NULL) {
10677 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10678 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10686 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10687 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10690 case CEE_CASTCLASS:
10694 token = read32 (ip + 1);
10695 klass = mini_get_class (method, token, generic_context);
10696 CHECK_TYPELOAD (klass);
10697 if (sp [0]->type != STACK_OBJ)
10700 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10701 CHECK_CFG_EXCEPTION;
10710 token = read32 (ip + 1);
10711 klass = mini_get_class (method, token, generic_context);
10712 CHECK_TYPELOAD (klass);
10713 if (sp [0]->type != STACK_OBJ)
10716 context_used = mini_class_check_context_used (cfg, klass);
10718 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10719 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10720 MonoInst *args [3];
10727 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10730 if (cfg->compile_aot) {
10731 idx = get_castclass_cache_idx (cfg);
10732 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10734 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10737 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10740 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10741 MonoMethod *mono_isinst;
10742 MonoInst *iargs [1];
10745 mono_isinst = mono_marshal_get_isinst (klass);
10746 iargs [0] = sp [0];
10748 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10749 iargs, ip, cfg->real_offset, TRUE);
10750 CHECK_CFG_EXCEPTION;
10751 g_assert (costs > 0);
10754 cfg->real_offset += 5;
10758 inline_costs += costs;
10761 ins = handle_isinst (cfg, klass, *sp, context_used);
10762 CHECK_CFG_EXCEPTION;
10768 case CEE_UNBOX_ANY: {
10769 MonoInst *res, *addr;
10774 token = read32 (ip + 1);
10775 klass = mini_get_class (method, token, generic_context);
10776 CHECK_TYPELOAD (klass);
10778 mono_save_token_info (cfg, image, token, klass);
10780 context_used = mini_class_check_context_used (cfg, klass);
10782 if (mini_is_gsharedvt_klass (klass)) {
10783 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10785 } else if (generic_class_is_reference_type (cfg, klass)) {
10786 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10787 CHECK_CFG_EXCEPTION;
10788 } else if (mono_class_is_nullable (klass)) {
10789 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10791 addr = handle_unbox (cfg, klass, sp, context_used);
10793 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10804 MonoClass *enum_class;
10805 MonoMethod *has_flag;
10811 token = read32 (ip + 1);
10812 klass = mini_get_class (method, token, generic_context);
10813 CHECK_TYPELOAD (klass);
10815 mono_save_token_info (cfg, image, token, klass);
10817 context_used = mini_class_check_context_used (cfg, klass);
10819 if (generic_class_is_reference_type (cfg, klass)) {
10825 if (klass == mono_defaults.void_class)
10827 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10829 /* frequent check in generic code: box (struct), brtrue */
10834 * <push int/long ptr>
10837 * constrained. MyFlags
10838 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10840 * If we find this sequence and the operand types on box and constrained
10841 * are equal, we can emit a specialized instruction sequence instead of
10842 * the very slow HasFlag () call.
10844 if ((cfg->opt & MONO_OPT_INTRINS) &&
10845 /* Cheap checks first. */
10846 ip + 5 + 6 + 5 < end &&
10847 ip [5] == CEE_PREFIX1 &&
10848 ip [6] == CEE_CONSTRAINED_ &&
10849 ip [11] == CEE_CALLVIRT &&
10850 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10851 mono_class_is_enum (klass) &&
10852 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10853 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10854 has_flag->klass == mono_defaults.enum_class &&
10855 !strcmp (has_flag->name, "HasFlag") &&
10856 has_flag->signature->hasthis &&
10857 has_flag->signature->param_count == 1) {
10858 CHECK_TYPELOAD (enum_class);
10860 if (enum_class == klass) {
10861 MonoInst *enum_this, *enum_flag;
10866 enum_this = sp [0];
10867 enum_flag = sp [1];
10869 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10874 // FIXME: LLVM can't handle the inconsistent bb linking
10875 if (!mono_class_is_nullable (klass) &&
10876 !mini_is_gsharedvt_klass (klass) &&
10877 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10878 (ip [5] == CEE_BRTRUE ||
10879 ip [5] == CEE_BRTRUE_S ||
10880 ip [5] == CEE_BRFALSE ||
10881 ip [5] == CEE_BRFALSE_S)) {
10882 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10884 MonoBasicBlock *true_bb, *false_bb;
10888 if (cfg->verbose_level > 3) {
10889 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10890 printf ("<box+brtrue opt>\n");
10895 case CEE_BRFALSE_S:
10898 target = ip + 1 + (signed char)(*ip);
10905 target = ip + 4 + (gint)(read32 (ip));
10909 g_assert_not_reached ();
10913 * We need to link both bblocks, since it is needed for handling stack
10914 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10915 * Branching to only one of them would lead to inconsistencies, so
10916 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10918 GET_BBLOCK (cfg, true_bb, target);
10919 GET_BBLOCK (cfg, false_bb, ip);
10921 mono_link_bblock (cfg, cfg->cbb, true_bb);
10922 mono_link_bblock (cfg, cfg->cbb, false_bb);
10924 if (sp != stack_start) {
10925 handle_stack_args (cfg, stack_start, sp - stack_start);
10927 CHECK_UNVERIFIABLE (cfg);
10930 if (COMPILE_LLVM (cfg)) {
10931 dreg = alloc_ireg (cfg);
10932 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10933 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10935 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10937 /* The JIT can't eliminate the iconst+compare */
10938 MONO_INST_NEW (cfg, ins, OP_BR);
10939 ins->inst_target_bb = is_true ? true_bb : false_bb;
10940 MONO_ADD_INS (cfg->cbb, ins);
10943 start_new_bblock = 1;
10947 *sp++ = handle_box (cfg, val, klass, context_used);
10949 CHECK_CFG_EXCEPTION;
10958 token = read32 (ip + 1);
10959 klass = mini_get_class (method, token, generic_context);
10960 CHECK_TYPELOAD (klass);
10962 mono_save_token_info (cfg, image, token, klass);
10964 context_used = mini_class_check_context_used (cfg, klass);
10966 if (mono_class_is_nullable (klass)) {
10969 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10970 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10974 ins = handle_unbox (cfg, klass, sp, context_used);
10987 MonoClassField *field;
10988 #ifndef DISABLE_REMOTING
10992 gboolean is_instance;
10994 gpointer addr = NULL;
10995 gboolean is_special_static;
10997 MonoInst *store_val = NULL;
10998 MonoInst *thread_ins;
11001 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11003 if (op == CEE_STFLD) {
11006 store_val = sp [1];
11011 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11013 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11016 if (op == CEE_STSFLD) {
11019 store_val = sp [0];
11024 token = read32 (ip + 1);
11025 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11026 field = mono_method_get_wrapper_data (method, token);
11027 klass = field->parent;
11030 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11033 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11034 FIELD_ACCESS_FAILURE (method, field);
11035 mono_class_init (klass);
11037 /* if the class is Critical then transparent code cannot access it's fields */
11038 if (!is_instance && mono_security_core_clr_enabled ())
11039 ensure_method_is_allowed_to_access_field (cfg, method, field);
11041 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11042 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11043 if (mono_security_core_clr_enabled ())
11044 ensure_method_is_allowed_to_access_field (cfg, method, field);
11047 ftype = mono_field_get_type (field);
11050 * LDFLD etc. is usable on static fields as well, so convert those cases to
11053 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11065 g_assert_not_reached ();
11067 is_instance = FALSE;
11070 context_used = mini_class_check_context_used (cfg, klass);
11072 /* INSTANCE CASE */
11074 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11075 if (op == CEE_STFLD) {
11076 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11078 #ifndef DISABLE_REMOTING
11079 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11080 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11081 MonoInst *iargs [5];
11083 GSHAREDVT_FAILURE (op);
11085 iargs [0] = sp [0];
11086 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11087 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11088 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11090 iargs [4] = sp [1];
11092 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11093 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11094 iargs, ip, cfg->real_offset, TRUE);
11095 CHECK_CFG_EXCEPTION;
11096 g_assert (costs > 0);
11098 cfg->real_offset += 5;
11100 inline_costs += costs;
11102 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11109 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11111 if (mini_is_gsharedvt_klass (klass)) {
11112 MonoInst *offset_ins;
11114 context_used = mini_class_check_context_used (cfg, klass);
11116 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11117 dreg = alloc_ireg_mp (cfg);
11118 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11119 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11120 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11122 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11124 if (sp [0]->opcode != OP_LDADDR)
11125 store->flags |= MONO_INST_FAULT;
11127 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11128 /* insert call to write barrier */
11132 dreg = alloc_ireg_mp (cfg);
11133 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11134 emit_write_barrier (cfg, ptr, sp [1]);
11137 store->flags |= ins_flag;
11144 #ifndef DISABLE_REMOTING
11145 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11146 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11147 MonoInst *iargs [4];
11149 GSHAREDVT_FAILURE (op);
11151 iargs [0] = sp [0];
11152 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11153 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11154 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11155 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11156 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11157 iargs, ip, cfg->real_offset, TRUE);
11158 CHECK_CFG_EXCEPTION;
11159 g_assert (costs > 0);
11161 cfg->real_offset += 5;
11165 inline_costs += costs;
11167 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11173 if (sp [0]->type == STACK_VTYPE) {
11176 /* Have to compute the address of the variable */
11178 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11180 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11182 g_assert (var->klass == klass);
11184 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11188 if (op == CEE_LDFLDA) {
11189 if (sp [0]->type == STACK_OBJ) {
11190 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11191 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11194 dreg = alloc_ireg_mp (cfg);
11196 if (mini_is_gsharedvt_klass (klass)) {
11197 MonoInst *offset_ins;
11199 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11200 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11202 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11204 ins->klass = mono_class_from_mono_type (field->type);
11205 ins->type = STACK_MP;
11210 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11212 if (mini_is_gsharedvt_klass (klass)) {
11213 MonoInst *offset_ins;
11215 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11216 dreg = alloc_ireg_mp (cfg);
11217 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11218 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11220 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11222 load->flags |= ins_flag;
11223 if (sp [0]->opcode != OP_LDADDR)
11224 load->flags |= MONO_INST_FAULT;
11236 context_used = mini_class_check_context_used (cfg, klass);
11238 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11241 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11242 * to be called here.
11244 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11245 mono_class_vtable (cfg->domain, klass);
11246 CHECK_TYPELOAD (klass);
11248 mono_domain_lock (cfg->domain);
11249 if (cfg->domain->special_static_fields)
11250 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11251 mono_domain_unlock (cfg->domain);
11253 is_special_static = mono_class_field_is_special_static (field);
11255 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11256 thread_ins = mono_get_thread_intrinsic (cfg);
11260 /* Generate IR to compute the field address */
11261 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11263 * Fast access to TLS data
11264 * Inline version of get_thread_static_data () in
11268 int idx, static_data_reg, array_reg, dreg;
11270 GSHAREDVT_FAILURE (op);
11272 MONO_ADD_INS (cfg->cbb, thread_ins);
11273 static_data_reg = alloc_ireg (cfg);
11274 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11276 if (cfg->compile_aot) {
11277 int offset_reg, offset2_reg, idx_reg;
11279 /* For TLS variables, this will return the TLS offset */
11280 EMIT_NEW_SFLDACONST (cfg, ins, field);
11281 offset_reg = ins->dreg;
11282 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11283 idx_reg = alloc_ireg (cfg);
11284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11286 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11287 array_reg = alloc_ireg (cfg);
11288 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11289 offset2_reg = alloc_ireg (cfg);
11290 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11291 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11292 dreg = alloc_ireg (cfg);
11293 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11295 offset = (gsize)addr & 0x7fffffff;
11296 idx = offset & 0x3f;
11298 array_reg = alloc_ireg (cfg);
11299 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11300 dreg = alloc_ireg (cfg);
11301 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11303 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11304 (cfg->compile_aot && is_special_static) ||
11305 (context_used && is_special_static)) {
11306 MonoInst *iargs [2];
11308 g_assert (field->parent);
11309 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11310 if (context_used) {
11311 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11312 field, MONO_RGCTX_INFO_CLASS_FIELD);
11314 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11316 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11317 } else if (context_used) {
11318 MonoInst *static_data;
11321 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11322 method->klass->name_space, method->klass->name, method->name,
11323 depth, field->offset);
11326 if (mono_class_needs_cctor_run (klass, method))
11327 emit_class_init (cfg, klass);
11330 * The pointer we're computing here is
11332 * super_info.static_data + field->offset
11334 static_data = emit_get_rgctx_klass (cfg, context_used,
11335 klass, MONO_RGCTX_INFO_STATIC_DATA);
11337 if (mini_is_gsharedvt_klass (klass)) {
11338 MonoInst *offset_ins;
11340 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11341 dreg = alloc_ireg_mp (cfg);
11342 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11343 } else if (field->offset == 0) {
11346 int addr_reg = mono_alloc_preg (cfg);
11347 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11349 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11350 MonoInst *iargs [2];
11352 g_assert (field->parent);
11353 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11354 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11355 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11357 MonoVTable *vtable = NULL;
11359 if (!cfg->compile_aot)
11360 vtable = mono_class_vtable (cfg->domain, klass);
11361 CHECK_TYPELOAD (klass);
11364 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11365 if (!(g_slist_find (class_inits, klass))) {
11366 emit_class_init (cfg, klass);
11367 if (cfg->verbose_level > 2)
11368 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11369 class_inits = g_slist_prepend (class_inits, klass);
11372 if (cfg->run_cctors) {
11374 /* This makes so that inline cannot trigger */
11375 /* .cctors: too many apps depend on them */
11376 /* running with a specific order... */
11378 if (! vtable->initialized)
11379 INLINE_FAILURE ("class init");
11380 ex = mono_runtime_class_init_full (vtable, FALSE);
11382 set_exception_object (cfg, ex);
11383 goto exception_exit;
11387 if (cfg->compile_aot)
11388 EMIT_NEW_SFLDACONST (cfg, ins, field);
11391 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11393 EMIT_NEW_PCONST (cfg, ins, addr);
11396 MonoInst *iargs [1];
11397 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11398 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11402 /* Generate IR to do the actual load/store operation */
11404 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11405 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11406 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11409 if (op == CEE_LDSFLDA) {
11410 ins->klass = mono_class_from_mono_type (ftype);
11411 ins->type = STACK_PTR;
11413 } else if (op == CEE_STSFLD) {
11416 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11417 store->flags |= ins_flag;
11419 gboolean is_const = FALSE;
11420 MonoVTable *vtable = NULL;
11421 gpointer addr = NULL;
11423 if (!context_used) {
11424 vtable = mono_class_vtable (cfg->domain, klass);
11425 CHECK_TYPELOAD (klass);
11427 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11428 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11429 int ro_type = ftype->type;
11431 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11432 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11433 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11436 GSHAREDVT_FAILURE (op);
11438 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11441 case MONO_TYPE_BOOLEAN:
11443 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11447 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11450 case MONO_TYPE_CHAR:
11452 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11456 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11461 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11465 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11470 case MONO_TYPE_PTR:
11471 case MONO_TYPE_FNPTR:
11472 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11473 type_to_eval_stack_type ((cfg), field->type, *sp);
11476 case MONO_TYPE_STRING:
11477 case MONO_TYPE_OBJECT:
11478 case MONO_TYPE_CLASS:
11479 case MONO_TYPE_SZARRAY:
11480 case MONO_TYPE_ARRAY:
11481 if (!mono_gc_is_moving ()) {
11482 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11483 type_to_eval_stack_type ((cfg), field->type, *sp);
11491 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11496 case MONO_TYPE_VALUETYPE:
11506 CHECK_STACK_OVF (1);
11508 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11509 load->flags |= ins_flag;
11515 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11516 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11517 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11528 token = read32 (ip + 1);
11529 klass = mini_get_class (method, token, generic_context);
11530 CHECK_TYPELOAD (klass);
11531 if (ins_flag & MONO_INST_VOLATILE) {
11532 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11533 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11535 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11536 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11537 ins->flags |= ins_flag;
11538 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11539 generic_class_is_reference_type (cfg, klass)) {
11540 /* insert call to write barrier */
11541 emit_write_barrier (cfg, sp [0], sp [1]);
11553 const char *data_ptr;
11555 guint32 field_token;
11561 token = read32 (ip + 1);
11563 klass = mini_get_class (method, token, generic_context);
11564 CHECK_TYPELOAD (klass);
11566 context_used = mini_class_check_context_used (cfg, klass);
11568 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11569 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11570 ins->sreg1 = sp [0]->dreg;
11571 ins->type = STACK_I4;
11572 ins->dreg = alloc_ireg (cfg);
11573 MONO_ADD_INS (cfg->cbb, ins);
11574 *sp = mono_decompose_opcode (cfg, ins);
11577 if (context_used) {
11578 MonoInst *args [3];
11579 MonoClass *array_class = mono_array_class_get (klass, 1);
11580 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11582 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11585 args [0] = emit_get_rgctx_klass (cfg, context_used,
11586 array_class, MONO_RGCTX_INFO_VTABLE);
11591 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11593 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11595 if (cfg->opt & MONO_OPT_SHARED) {
11596 /* Decompose now to avoid problems with references to the domainvar */
11597 MonoInst *iargs [3];
11599 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11600 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11601 iargs [2] = sp [0];
11603 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11605 /* Decompose later since it is needed by abcrem */
11606 MonoClass *array_type = mono_array_class_get (klass, 1);
11607 mono_class_vtable (cfg->domain, array_type);
11608 CHECK_TYPELOAD (array_type);
11610 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11611 ins->dreg = alloc_ireg_ref (cfg);
11612 ins->sreg1 = sp [0]->dreg;
11613 ins->inst_newa_class = klass;
11614 ins->type = STACK_OBJ;
11615 ins->klass = array_type;
11616 MONO_ADD_INS (cfg->cbb, ins);
11617 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11618 cfg->cbb->has_array_access = TRUE;
11620 /* Needed so mono_emit_load_get_addr () gets called */
11621 mono_get_got_var (cfg);
11631 * we inline/optimize the initialization sequence if possible.
11632 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11633 * for small sizes open code the memcpy
11634 * ensure the rva field is big enough
11636 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11637 MonoMethod *memcpy_method = get_memcpy_method ();
11638 MonoInst *iargs [3];
11639 int add_reg = alloc_ireg_mp (cfg);
11641 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11642 if (cfg->compile_aot) {
11643 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11645 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11647 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11648 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11657 if (sp [0]->type != STACK_OBJ)
11660 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11661 ins->dreg = alloc_preg (cfg);
11662 ins->sreg1 = sp [0]->dreg;
11663 ins->type = STACK_I4;
11664 /* This flag will be inherited by the decomposition */
11665 ins->flags |= MONO_INST_FAULT;
11666 MONO_ADD_INS (cfg->cbb, ins);
11667 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11668 cfg->cbb->has_array_access = TRUE;
11676 if (sp [0]->type != STACK_OBJ)
11679 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11681 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11682 CHECK_TYPELOAD (klass);
11683 /* we need to make sure that this array is exactly the type it needs
11684 * to be for correctness. the wrappers are lax with their usage
11685 * so we need to ignore them here
11687 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11688 MonoClass *array_class = mono_array_class_get (klass, 1);
11689 mini_emit_check_array_type (cfg, sp [0], array_class);
11690 CHECK_TYPELOAD (array_class);
11694 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11699 case CEE_LDELEM_I1:
11700 case CEE_LDELEM_U1:
11701 case CEE_LDELEM_I2:
11702 case CEE_LDELEM_U2:
11703 case CEE_LDELEM_I4:
11704 case CEE_LDELEM_U4:
11705 case CEE_LDELEM_I8:
11707 case CEE_LDELEM_R4:
11708 case CEE_LDELEM_R8:
11709 case CEE_LDELEM_REF: {
11715 if (*ip == CEE_LDELEM) {
11717 token = read32 (ip + 1);
11718 klass = mini_get_class (method, token, generic_context);
11719 CHECK_TYPELOAD (klass);
11720 mono_class_init (klass);
11723 klass = array_access_to_klass (*ip);
11725 if (sp [0]->type != STACK_OBJ)
11728 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11730 if (mini_is_gsharedvt_variable_klass (klass)) {
11731 // FIXME-VT: OP_ICONST optimization
11732 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11733 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11734 ins->opcode = OP_LOADV_MEMBASE;
11735 } else if (sp [1]->opcode == OP_ICONST) {
11736 int array_reg = sp [0]->dreg;
11737 int index_reg = sp [1]->dreg;
11738 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11740 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11741 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11743 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11744 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11747 if (*ip == CEE_LDELEM)
11754 case CEE_STELEM_I1:
11755 case CEE_STELEM_I2:
11756 case CEE_STELEM_I4:
11757 case CEE_STELEM_I8:
11758 case CEE_STELEM_R4:
11759 case CEE_STELEM_R8:
11760 case CEE_STELEM_REF:
11765 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11767 if (*ip == CEE_STELEM) {
11769 token = read32 (ip + 1);
11770 klass = mini_get_class (method, token, generic_context);
11771 CHECK_TYPELOAD (klass);
11772 mono_class_init (klass);
11775 klass = array_access_to_klass (*ip);
11777 if (sp [0]->type != STACK_OBJ)
11780 emit_array_store (cfg, klass, sp, TRUE);
11782 if (*ip == CEE_STELEM)
11789 case CEE_CKFINITE: {
11793 if (cfg->llvm_only) {
11794 MonoInst *iargs [1];
11796 iargs [0] = sp [0];
11797 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11799 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11800 ins->sreg1 = sp [0]->dreg;
11801 ins->dreg = alloc_freg (cfg);
11802 ins->type = STACK_R8;
11803 MONO_ADD_INS (cfg->cbb, ins);
11805 *sp++ = mono_decompose_opcode (cfg, ins);
11811 case CEE_REFANYVAL: {
11812 MonoInst *src_var, *src;
11814 int klass_reg = alloc_preg (cfg);
11815 int dreg = alloc_preg (cfg);
11817 GSHAREDVT_FAILURE (*ip);
11820 MONO_INST_NEW (cfg, ins, *ip);
11823 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11824 CHECK_TYPELOAD (klass);
11826 context_used = mini_class_check_context_used (cfg, klass);
11829 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11831 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11832 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11833 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11835 if (context_used) {
11836 MonoInst *klass_ins;
11838 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11839 klass, MONO_RGCTX_INFO_KLASS);
11842 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11843 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11845 mini_emit_class_check (cfg, klass_reg, klass);
11847 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11848 ins->type = STACK_MP;
11849 ins->klass = klass;
11854 case CEE_MKREFANY: {
11855 MonoInst *loc, *addr;
11857 GSHAREDVT_FAILURE (*ip);
11860 MONO_INST_NEW (cfg, ins, *ip);
11863 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11864 CHECK_TYPELOAD (klass);
11866 context_used = mini_class_check_context_used (cfg, klass);
11868 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11869 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11871 if (context_used) {
11872 MonoInst *const_ins;
11873 int type_reg = alloc_preg (cfg);
11875 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11879 } else if (cfg->compile_aot) {
11880 int const_reg = alloc_preg (cfg);
11881 int type_reg = alloc_preg (cfg);
11883 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11885 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11888 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11889 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11893 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11894 ins->type = STACK_VTYPE;
11895 ins->klass = mono_defaults.typed_reference_class;
11900 case CEE_LDTOKEN: {
11902 MonoClass *handle_class;
11904 CHECK_STACK_OVF (1);
11907 n = read32 (ip + 1);
11909 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11910 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11911 handle = mono_method_get_wrapper_data (method, n);
11912 handle_class = mono_method_get_wrapper_data (method, n + 1);
11913 if (handle_class == mono_defaults.typehandle_class)
11914 handle = &((MonoClass*)handle)->byval_arg;
11917 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11922 mono_class_init (handle_class);
11923 if (cfg->gshared) {
11924 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11925 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11926 /* This case handles ldtoken
11927 of an open type, like for
11930 } else if (handle_class == mono_defaults.typehandle_class) {
11931 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11932 } else if (handle_class == mono_defaults.fieldhandle_class)
11933 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11934 else if (handle_class == mono_defaults.methodhandle_class)
11935 context_used = mini_method_check_context_used (cfg, handle);
11937 g_assert_not_reached ();
11940 if ((cfg->opt & MONO_OPT_SHARED) &&
11941 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11942 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11943 MonoInst *addr, *vtvar, *iargs [3];
11944 int method_context_used;
11946 method_context_used = mini_method_check_context_used (cfg, method);
11948 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11950 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11951 EMIT_NEW_ICONST (cfg, iargs [1], n);
11952 if (method_context_used) {
11953 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11954 method, MONO_RGCTX_INFO_METHOD);
11955 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11957 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11958 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11960 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11962 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11964 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11966 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11967 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11968 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11969 (cmethod->klass == mono_defaults.systemtype_class) &&
11970 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11971 MonoClass *tclass = mono_class_from_mono_type (handle);
11973 mono_class_init (tclass);
11974 if (context_used) {
11975 ins = emit_get_rgctx_klass (cfg, context_used,
11976 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11977 } else if (cfg->compile_aot) {
11978 if (method->wrapper_type) {
11979 mono_error_init (&error); //got to do it since there are multiple conditionals below
11980 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11981 /* Special case for static synchronized wrappers */
11982 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11984 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11985 /* FIXME: n is not a normal token */
11987 EMIT_NEW_PCONST (cfg, ins, NULL);
11990 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11993 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11995 ins->type = STACK_OBJ;
11996 ins->klass = cmethod->klass;
11999 MonoInst *addr, *vtvar;
12001 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12003 if (context_used) {
12004 if (handle_class == mono_defaults.typehandle_class) {
12005 ins = emit_get_rgctx_klass (cfg, context_used,
12006 mono_class_from_mono_type (handle),
12007 MONO_RGCTX_INFO_TYPE);
12008 } else if (handle_class == mono_defaults.methodhandle_class) {
12009 ins = emit_get_rgctx_method (cfg, context_used,
12010 handle, MONO_RGCTX_INFO_METHOD);
12011 } else if (handle_class == mono_defaults.fieldhandle_class) {
12012 ins = emit_get_rgctx_field (cfg, context_used,
12013 handle, MONO_RGCTX_INFO_CLASS_FIELD);
12015 g_assert_not_reached ();
12017 } else if (cfg->compile_aot) {
12018 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12020 EMIT_NEW_PCONST (cfg, ins, handle);
12022 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12023 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12024 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12034 MONO_INST_NEW (cfg, ins, OP_THROW);
12036 ins->sreg1 = sp [0]->dreg;
12038 cfg->cbb->out_of_line = TRUE;
12039 MONO_ADD_INS (cfg->cbb, ins);
12040 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12041 MONO_ADD_INS (cfg->cbb, ins);
12044 link_bblock (cfg, cfg->cbb, end_bblock);
12045 start_new_bblock = 1;
12046 /* This can complicate code generation for llvm since the return value might not be defined */
12047 if (COMPILE_LLVM (cfg))
12048 INLINE_FAILURE ("throw");
12050 case CEE_ENDFINALLY:
12051 /* mono_save_seq_point_info () depends on this */
12052 if (sp != stack_start)
12053 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12054 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12055 MONO_ADD_INS (cfg->cbb, ins);
12057 start_new_bblock = 1;
12060 * Control will leave the method so empty the stack, otherwise
12061 * the next basic block will start with a nonempty stack.
12063 while (sp != stack_start) {
12068 case CEE_LEAVE_S: {
12071 if (*ip == CEE_LEAVE) {
12073 target = ip + 5 + (gint32)read32(ip + 1);
12076 target = ip + 2 + (signed char)(ip [1]);
12079 /* empty the stack */
12080 while (sp != stack_start) {
12085 * If this leave statement is in a catch block, check for a
12086 * pending exception, and rethrow it if necessary.
12087 * We avoid doing this in runtime invoke wrappers, since those are called
12088 * by native code which excepts the wrapper to catch all exceptions.
12090 for (i = 0; i < header->num_clauses; ++i) {
12091 MonoExceptionClause *clause = &header->clauses [i];
12094 * Use <= in the final comparison to handle clauses with multiple
12095 * leave statements, like in bug #78024.
12096 * The ordering of the exception clauses guarantees that we find the
12097 * innermost clause.
12099 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12101 MonoBasicBlock *dont_throw;
12106 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12109 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12111 NEW_BBLOCK (cfg, dont_throw);
12114 * Currently, we always rethrow the abort exception, despite the
12115 * fact that this is not correct. See thread6.cs for an example.
12116 * But propagating the abort exception is more important than
12117 * getting the sematics right.
12119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12120 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12121 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12123 MONO_START_BB (cfg, dont_throw);
12128 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12131 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12133 MonoExceptionClause *clause;
12135 for (tmp = handlers; tmp; tmp = tmp->next) {
12136 clause = tmp->data;
12137 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12139 link_bblock (cfg, cfg->cbb, tblock);
12140 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12141 ins->inst_target_bb = tblock;
12142 ins->inst_eh_block = clause;
12143 MONO_ADD_INS (cfg->cbb, ins);
12144 cfg->cbb->has_call_handler = 1;
12145 if (COMPILE_LLVM (cfg)) {
12146 MonoBasicBlock *target_bb;
12149 * Link the finally bblock with the target, since it will
12150 * conceptually branch there.
12151 * FIXME: Have to link the bblock containing the endfinally.
12153 GET_BBLOCK (cfg, target_bb, target);
12154 link_bblock (cfg, tblock, target_bb);
12157 g_list_free (handlers);
12160 MONO_INST_NEW (cfg, ins, OP_BR);
12161 MONO_ADD_INS (cfg->cbb, ins);
12162 GET_BBLOCK (cfg, tblock, target);
12163 link_bblock (cfg, cfg->cbb, tblock);
12164 ins->inst_target_bb = tblock;
12166 start_new_bblock = 1;
12168 if (*ip == CEE_LEAVE)
12177 * Mono specific opcodes
12179 case MONO_CUSTOM_PREFIX: {
12181 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12185 case CEE_MONO_ICALL: {
12187 MonoJitICallInfo *info;
12189 token = read32 (ip + 2);
12190 func = mono_method_get_wrapper_data (method, token);
12191 info = mono_find_jit_icall_by_addr (func);
12193 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12196 CHECK_STACK (info->sig->param_count);
12197 sp -= info->sig->param_count;
12199 ins = mono_emit_jit_icall (cfg, info->func, sp);
12200 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12204 inline_costs += 10 * num_calls++;
12208 case CEE_MONO_LDPTR_CARD_TABLE:
12209 case CEE_MONO_LDPTR_NURSERY_START:
12210 case CEE_MONO_LDPTR_NURSERY_BITS:
12211 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12212 CHECK_STACK_OVF (1);
12215 case CEE_MONO_LDPTR_CARD_TABLE:
12216 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
12218 case CEE_MONO_LDPTR_NURSERY_START:
12219 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_GC_NURSERY_START);
12221 case CEE_MONO_LDPTR_NURSERY_BITS:
12222 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_GC_NURSERY_BITS);
12224 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12225 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG);
12231 inline_costs += 10 * num_calls++;
12234 case CEE_MONO_LDPTR: {
12237 CHECK_STACK_OVF (1);
12239 token = read32 (ip + 2);
12241 ptr = mono_method_get_wrapper_data (method, token);
12242 EMIT_NEW_PCONST (cfg, ins, ptr);
12245 inline_costs += 10 * num_calls++;
12246 /* Can't embed random pointers into AOT code */
12250 case CEE_MONO_JIT_ICALL_ADDR: {
12251 MonoJitICallInfo *callinfo;
12254 CHECK_STACK_OVF (1);
12256 token = read32 (ip + 2);
12258 ptr = mono_method_get_wrapper_data (method, token);
12259 callinfo = mono_find_jit_icall_by_addr (ptr);
12260 g_assert (callinfo);
12261 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12264 inline_costs += 10 * num_calls++;
12267 case CEE_MONO_ICALL_ADDR: {
12268 MonoMethod *cmethod;
12271 CHECK_STACK_OVF (1);
12273 token = read32 (ip + 2);
12275 cmethod = mono_method_get_wrapper_data (method, token);
12277 if (cfg->compile_aot) {
12278 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12280 ptr = mono_lookup_internal_call (cmethod);
12282 EMIT_NEW_PCONST (cfg, ins, ptr);
12288 case CEE_MONO_VTADDR: {
12289 MonoInst *src_var, *src;
12295 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12296 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12301 case CEE_MONO_NEWOBJ: {
12302 MonoInst *iargs [2];
12304 CHECK_STACK_OVF (1);
12306 token = read32 (ip + 2);
12307 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12308 mono_class_init (klass);
12309 NEW_DOMAINCONST (cfg, iargs [0]);
12310 MONO_ADD_INS (cfg->cbb, iargs [0]);
12311 NEW_CLASSCONST (cfg, iargs [1], klass);
12312 MONO_ADD_INS (cfg->cbb, iargs [1]);
12313 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12315 inline_costs += 10 * num_calls++;
12318 case CEE_MONO_OBJADDR:
12321 MONO_INST_NEW (cfg, ins, OP_MOVE);
12322 ins->dreg = alloc_ireg_mp (cfg);
12323 ins->sreg1 = sp [0]->dreg;
12324 ins->type = STACK_MP;
12325 MONO_ADD_INS (cfg->cbb, ins);
12329 case CEE_MONO_LDNATIVEOBJ:
12331 * Similar to LDOBJ, but instead load the unmanaged
12332 * representation of the vtype to the stack.
12337 token = read32 (ip + 2);
12338 klass = mono_method_get_wrapper_data (method, token);
12339 g_assert (klass->valuetype);
12340 mono_class_init (klass);
12343 MonoInst *src, *dest, *temp;
12346 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12347 temp->backend.is_pinvoke = 1;
12348 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12349 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12351 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12352 dest->type = STACK_VTYPE;
12353 dest->klass = klass;
12359 case CEE_MONO_RETOBJ: {
12361 * Same as RET, but return the native representation of a vtype
12364 g_assert (cfg->ret);
12365 g_assert (mono_method_signature (method)->pinvoke);
12370 token = read32 (ip + 2);
12371 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12373 if (!cfg->vret_addr) {
12374 g_assert (cfg->ret_var_is_local);
12376 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12378 EMIT_NEW_RETLOADA (cfg, ins);
12380 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12382 if (sp != stack_start)
12385 MONO_INST_NEW (cfg, ins, OP_BR);
12386 ins->inst_target_bb = end_bblock;
12387 MONO_ADD_INS (cfg->cbb, ins);
12388 link_bblock (cfg, cfg->cbb, end_bblock);
12389 start_new_bblock = 1;
12393 case CEE_MONO_CISINST:
12394 case CEE_MONO_CCASTCLASS: {
12399 token = read32 (ip + 2);
12400 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12401 if (ip [1] == CEE_MONO_CISINST)
12402 ins = handle_cisinst (cfg, klass, sp [0]);
12404 ins = handle_ccastclass (cfg, klass, sp [0]);
12409 case CEE_MONO_SAVE_LMF:
12410 case CEE_MONO_RESTORE_LMF:
12413 case CEE_MONO_CLASSCONST:
12414 CHECK_STACK_OVF (1);
12416 token = read32 (ip + 2);
12417 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12420 inline_costs += 10 * num_calls++;
12422 case CEE_MONO_NOT_TAKEN:
12423 cfg->cbb->out_of_line = TRUE;
12426 case CEE_MONO_TLS: {
12429 CHECK_STACK_OVF (1);
12431 key = (gint32)read32 (ip + 2);
12432 g_assert (key < TLS_KEY_NUM);
12434 ins = mono_create_tls_get (cfg, key);
12436 if (cfg->compile_aot) {
12438 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12439 ins->dreg = alloc_preg (cfg);
12440 ins->type = STACK_PTR;
12442 g_assert_not_reached ();
12445 ins->type = STACK_PTR;
12446 MONO_ADD_INS (cfg->cbb, ins);
12451 case CEE_MONO_DYN_CALL: {
12452 MonoCallInst *call;
12454 /* It would be easier to call a trampoline, but that would put an
12455 * extra frame on the stack, confusing exception handling. So
12456 * implement it inline using an opcode for now.
12459 if (!cfg->dyn_call_var) {
12460 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12461 /* prevent it from being register allocated */
12462 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12465 /* Has to use a call inst since it local regalloc expects it */
12466 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12467 ins = (MonoInst*)call;
12469 ins->sreg1 = sp [0]->dreg;
12470 ins->sreg2 = sp [1]->dreg;
12471 MONO_ADD_INS (cfg->cbb, ins);
12473 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12476 inline_costs += 10 * num_calls++;
12480 case CEE_MONO_MEMORY_BARRIER: {
12482 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12486 case CEE_MONO_JIT_ATTACH: {
12487 MonoInst *args [16], *domain_ins;
12488 MonoInst *ad_ins, *jit_tls_ins;
12489 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12491 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12493 EMIT_NEW_PCONST (cfg, ins, NULL);
12494 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12496 ad_ins = mono_get_domain_intrinsic (cfg);
12497 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12499 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12500 NEW_BBLOCK (cfg, next_bb);
12501 NEW_BBLOCK (cfg, call_bb);
12503 if (cfg->compile_aot) {
12504 /* AOT code is only used in the root domain */
12505 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12507 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12509 MONO_ADD_INS (cfg->cbb, ad_ins);
12510 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12511 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12513 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12515 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12517 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12518 MONO_START_BB (cfg, call_bb);
12521 if (cfg->compile_aot) {
12522 /* AOT code is only used in the root domain */
12523 EMIT_NEW_PCONST (cfg, args [0], NULL);
12525 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12527 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12528 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12531 MONO_START_BB (cfg, next_bb);
12535 case CEE_MONO_JIT_DETACH: {
12536 MonoInst *args [16];
12538 /* Restore the original domain */
12539 dreg = alloc_ireg (cfg);
12540 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12541 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12546 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12552 case CEE_PREFIX1: {
12555 case CEE_ARGLIST: {
12556 /* somewhat similar to LDTOKEN */
12557 MonoInst *addr, *vtvar;
12558 CHECK_STACK_OVF (1);
12559 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12561 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12562 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12564 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12565 ins->type = STACK_VTYPE;
12566 ins->klass = mono_defaults.argumenthandle_class;
12576 MonoInst *cmp, *arg1, *arg2;
12584 * The following transforms:
12585 * CEE_CEQ into OP_CEQ
12586 * CEE_CGT into OP_CGT
12587 * CEE_CGT_UN into OP_CGT_UN
12588 * CEE_CLT into OP_CLT
12589 * CEE_CLT_UN into OP_CLT_UN
12591 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12593 MONO_INST_NEW (cfg, ins, cmp->opcode);
12594 cmp->sreg1 = arg1->dreg;
12595 cmp->sreg2 = arg2->dreg;
12596 type_from_op (cfg, cmp, arg1, arg2);
12598 add_widen_op (cfg, cmp, &arg1, &arg2);
12599 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12600 cmp->opcode = OP_LCOMPARE;
12601 else if (arg1->type == STACK_R4)
12602 cmp->opcode = OP_RCOMPARE;
12603 else if (arg1->type == STACK_R8)
12604 cmp->opcode = OP_FCOMPARE;
12606 cmp->opcode = OP_ICOMPARE;
12607 MONO_ADD_INS (cfg->cbb, cmp);
12608 ins->type = STACK_I4;
12609 ins->dreg = alloc_dreg (cfg, ins->type);
12610 type_from_op (cfg, ins, arg1, arg2);
12612 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12614 * The backends expect the fceq opcodes to do the
12617 ins->sreg1 = cmp->sreg1;
12618 ins->sreg2 = cmp->sreg2;
12621 MONO_ADD_INS (cfg->cbb, ins);
12627 MonoInst *argconst;
12628 MonoMethod *cil_method;
12630 CHECK_STACK_OVF (1);
12632 n = read32 (ip + 2);
12633 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12634 if (!cmethod || mono_loader_get_last_error ())
12636 mono_class_init (cmethod->klass);
12638 mono_save_token_info (cfg, image, n, cmethod);
12640 context_used = mini_method_check_context_used (cfg, cmethod);
12642 cil_method = cmethod;
12643 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12644 METHOD_ACCESS_FAILURE (method, cil_method);
12646 if (mono_security_core_clr_enabled ())
12647 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12650 * Optimize the common case of ldftn+delegate creation
12652 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12653 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12654 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12655 MonoInst *target_ins, *handle_ins;
12656 MonoMethod *invoke;
12657 int invoke_context_used;
12659 invoke = mono_get_delegate_invoke (ctor_method->klass);
12660 if (!invoke || !mono_method_signature (invoke))
12663 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12665 target_ins = sp [-1];
12667 if (mono_security_core_clr_enabled ())
12668 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12670 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12671 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12672 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12673 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12674 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12678 /* FIXME: SGEN support */
12679 if (invoke_context_used == 0) {
12681 if (cfg->verbose_level > 3)
12682 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12683 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12686 CHECK_CFG_EXCEPTION;
12696 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12697 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12701 inline_costs += 10 * num_calls++;
12704 case CEE_LDVIRTFTN: {
12705 MonoInst *args [2];
12709 n = read32 (ip + 2);
12710 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12711 if (!cmethod || mono_loader_get_last_error ())
12713 mono_class_init (cmethod->klass);
12715 context_used = mini_method_check_context_used (cfg, cmethod);
12717 if (mono_security_core_clr_enabled ())
12718 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12721 * Optimize the common case of ldvirtftn+delegate creation
12723 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12724 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12725 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12726 MonoInst *target_ins, *handle_ins;
12727 MonoMethod *invoke;
12728 int invoke_context_used;
12729 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12731 invoke = mono_get_delegate_invoke (ctor_method->klass);
12732 if (!invoke || !mono_method_signature (invoke))
12735 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12737 target_ins = sp [-1];
12739 if (mono_security_core_clr_enabled ())
12740 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12742 /* FIXME: SGEN support */
12743 if (invoke_context_used == 0 || cfg->llvm_only) {
12745 if (cfg->verbose_level > 3)
12746 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12747 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12750 CHECK_CFG_EXCEPTION;
12763 args [1] = emit_get_rgctx_method (cfg, context_used,
12764 cmethod, MONO_RGCTX_INFO_METHOD);
12767 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12769 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12772 inline_costs += 10 * num_calls++;
12776 CHECK_STACK_OVF (1);
12778 n = read16 (ip + 2);
12780 EMIT_NEW_ARGLOAD (cfg, ins, n);
12785 CHECK_STACK_OVF (1);
12787 n = read16 (ip + 2);
12789 NEW_ARGLOADA (cfg, ins, n);
12790 MONO_ADD_INS (cfg->cbb, ins);
12798 n = read16 (ip + 2);
12800 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12802 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12806 CHECK_STACK_OVF (1);
12808 n = read16 (ip + 2);
12810 EMIT_NEW_LOCLOAD (cfg, ins, n);
12815 unsigned char *tmp_ip;
12816 CHECK_STACK_OVF (1);
12818 n = read16 (ip + 2);
12821 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12827 EMIT_NEW_LOCLOADA (cfg, ins, n);
12836 n = read16 (ip + 2);
12838 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12840 emit_stloc_ir (cfg, sp, header, n);
12847 if (sp != stack_start)
12849 if (cfg->method != method)
12851 * Inlining this into a loop in a parent could lead to
12852 * stack overflows which is different behavior than the
12853 * non-inlined case, thus disable inlining in this case.
12855 INLINE_FAILURE("localloc");
12857 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12858 ins->dreg = alloc_preg (cfg);
12859 ins->sreg1 = sp [0]->dreg;
12860 ins->type = STACK_PTR;
12861 MONO_ADD_INS (cfg->cbb, ins);
12863 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12865 ins->flags |= MONO_INST_INIT;
12870 case CEE_ENDFILTER: {
12871 MonoExceptionClause *clause, *nearest;
12876 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12878 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12879 ins->sreg1 = (*sp)->dreg;
12880 MONO_ADD_INS (cfg->cbb, ins);
12881 start_new_bblock = 1;
12885 for (cc = 0; cc < header->num_clauses; ++cc) {
12886 clause = &header->clauses [cc];
12887 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12888 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12889 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12892 g_assert (nearest);
12893 if ((ip - header->code) != nearest->handler_offset)
12898 case CEE_UNALIGNED_:
12899 ins_flag |= MONO_INST_UNALIGNED;
12900 /* FIXME: record alignment? we can assume 1 for now */
12904 case CEE_VOLATILE_:
12905 ins_flag |= MONO_INST_VOLATILE;
12909 ins_flag |= MONO_INST_TAILCALL;
12910 cfg->flags |= MONO_CFG_HAS_TAIL;
12911 /* Can't inline tail calls at this time */
12912 inline_costs += 100000;
12919 token = read32 (ip + 2);
12920 klass = mini_get_class (method, token, generic_context);
12921 CHECK_TYPELOAD (klass);
12922 if (generic_class_is_reference_type (cfg, klass))
12923 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12925 mini_emit_initobj (cfg, *sp, NULL, klass);
12929 case CEE_CONSTRAINED_:
12931 token = read32 (ip + 2);
12932 constrained_class = mini_get_class (method, token, generic_context);
12933 CHECK_TYPELOAD (constrained_class);
12937 case CEE_INITBLK: {
12938 MonoInst *iargs [3];
12942 /* Skip optimized paths for volatile operations. */
12943 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12944 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12945 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12946 /* emit_memset only works when val == 0 */
12947 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12950 iargs [0] = sp [0];
12951 iargs [1] = sp [1];
12952 iargs [2] = sp [2];
12953 if (ip [1] == CEE_CPBLK) {
12955 * FIXME: It's unclear whether we should be emitting both the acquire
12956 * and release barriers for cpblk. It is technically both a load and
12957 * store operation, so it seems like that's the sensible thing to do.
12959 * FIXME: We emit full barriers on both sides of the operation for
12960 * simplicity. We should have a separate atomic memcpy method instead.
12962 MonoMethod *memcpy_method = get_memcpy_method ();
12964 if (ins_flag & MONO_INST_VOLATILE)
12965 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12967 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12968 call->flags |= ins_flag;
12970 if (ins_flag & MONO_INST_VOLATILE)
12971 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12973 MonoMethod *memset_method = get_memset_method ();
12974 if (ins_flag & MONO_INST_VOLATILE) {
12975 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12976 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12978 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12979 call->flags |= ins_flag;
12990 ins_flag |= MONO_INST_NOTYPECHECK;
12992 ins_flag |= MONO_INST_NORANGECHECK;
12993 /* we ignore the no-nullcheck for now since we
12994 * really do it explicitly only when doing callvirt->call
12998 case CEE_RETHROW: {
13000 int handler_offset = -1;
13002 for (i = 0; i < header->num_clauses; ++i) {
13003 MonoExceptionClause *clause = &header->clauses [i];
13004 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13005 handler_offset = clause->handler_offset;
13010 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13012 if (handler_offset == -1)
13015 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13016 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13017 ins->sreg1 = load->dreg;
13018 MONO_ADD_INS (cfg->cbb, ins);
13020 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13021 MONO_ADD_INS (cfg->cbb, ins);
13024 link_bblock (cfg, cfg->cbb, end_bblock);
13025 start_new_bblock = 1;
13033 CHECK_STACK_OVF (1);
13035 token = read32 (ip + 2);
13036 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13037 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13040 val = mono_type_size (type, &ialign);
13042 MonoClass *klass = mini_get_class (method, token, generic_context);
13043 CHECK_TYPELOAD (klass);
13045 val = mono_type_size (&klass->byval_arg, &ialign);
13047 if (mini_is_gsharedvt_klass (klass))
13048 GSHAREDVT_FAILURE (*ip);
13050 EMIT_NEW_ICONST (cfg, ins, val);
13055 case CEE_REFANYTYPE: {
13056 MonoInst *src_var, *src;
13058 GSHAREDVT_FAILURE (*ip);
13064 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13066 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13067 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13068 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13073 case CEE_READONLY_:
13086 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13096 g_warning ("opcode 0x%02x not handled", *ip);
13100 if (start_new_bblock != 1)
13103 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13104 if (cfg->cbb->next_bb) {
13105 /* This could already be set because of inlining, #693905 */
13106 MonoBasicBlock *bb = cfg->cbb;
13108 while (bb->next_bb)
13110 bb->next_bb = end_bblock;
13112 cfg->cbb->next_bb = end_bblock;
13115 if (cfg->method == method && cfg->domainvar) {
13117 MonoInst *get_domain;
13119 cfg->cbb = init_localsbb;
13121 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13122 MONO_ADD_INS (cfg->cbb, get_domain);
13124 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13126 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13127 MONO_ADD_INS (cfg->cbb, store);
13130 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13131 if (cfg->compile_aot)
13132 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13133 mono_get_got_var (cfg);
13136 if (cfg->method == method && cfg->got_var)
13137 mono_emit_load_got_addr (cfg);
13139 if (init_localsbb) {
13140 cfg->cbb = init_localsbb;
13142 for (i = 0; i < header->num_locals; ++i) {
13143 emit_init_local (cfg, i, header->locals [i], init_locals);
13147 if (cfg->init_ref_vars && cfg->method == method) {
13148 /* Emit initialization for ref vars */
13149 // FIXME: Avoid duplication initialization for IL locals.
13150 for (i = 0; i < cfg->num_varinfo; ++i) {
13151 MonoInst *ins = cfg->varinfo [i];
13153 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13154 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13158 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13159 cfg->cbb = init_localsbb;
13160 emit_push_lmf (cfg);
13163 cfg->cbb = init_localsbb;
13164 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13167 MonoBasicBlock *bb;
13170 * Make seq points at backward branch targets interruptable.
13172 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13173 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13174 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13177 /* Add a sequence point for method entry/exit events */
13178 if (seq_points && cfg->gen_sdb_seq_points) {
13179 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13180 MONO_ADD_INS (init_localsbb, ins);
13181 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13182 MONO_ADD_INS (cfg->bb_exit, ins);
13186 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13187 * the code they refer to was dead (#11880).
13189 if (sym_seq_points) {
13190 for (i = 0; i < header->code_size; ++i) {
13191 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13194 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13195 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13202 if (cfg->method == method) {
13203 MonoBasicBlock *bb;
13204 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13205 bb->region = mono_find_block_region (cfg, bb->real_offset);
13207 mono_create_spvar_for_region (cfg, bb->region);
13208 if (cfg->verbose_level > 2)
13209 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13213 if (inline_costs < 0) {
13216 /* Method is too large */
13217 mname = mono_method_full_name (method, TRUE);
13218 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13219 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13223 if ((cfg->verbose_level > 2) && (cfg->method == method))
13224 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13229 g_assert (!mono_error_ok (&cfg->error));
13233 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13237 set_exception_type_from_invalid_il (cfg, method, ip);
13241 g_slist_free (class_inits);
13242 mono_basic_block_free (original_bb);
13243 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13244 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13245 if (cfg->exception_type)
13248 return inline_costs;
13252 store_membase_reg_to_store_membase_imm (int opcode)
13255 case OP_STORE_MEMBASE_REG:
13256 return OP_STORE_MEMBASE_IMM;
13257 case OP_STOREI1_MEMBASE_REG:
13258 return OP_STOREI1_MEMBASE_IMM;
13259 case OP_STOREI2_MEMBASE_REG:
13260 return OP_STOREI2_MEMBASE_IMM;
13261 case OP_STOREI4_MEMBASE_REG:
13262 return OP_STOREI4_MEMBASE_IMM;
13263 case OP_STOREI8_MEMBASE_REG:
13264 return OP_STOREI8_MEMBASE_IMM;
13266 g_assert_not_reached ();
13273 mono_op_to_op_imm (int opcode)
13277 return OP_IADD_IMM;
13279 return OP_ISUB_IMM;
13281 return OP_IDIV_IMM;
13283 return OP_IDIV_UN_IMM;
13285 return OP_IREM_IMM;
13287 return OP_IREM_UN_IMM;
13289 return OP_IMUL_IMM;
13291 return OP_IAND_IMM;
13295 return OP_IXOR_IMM;
13297 return OP_ISHL_IMM;
13299 return OP_ISHR_IMM;
13301 return OP_ISHR_UN_IMM;
13304 return OP_LADD_IMM;
13306 return OP_LSUB_IMM;
13308 return OP_LAND_IMM;
13312 return OP_LXOR_IMM;
13314 return OP_LSHL_IMM;
13316 return OP_LSHR_IMM;
13318 return OP_LSHR_UN_IMM;
13319 #if SIZEOF_REGISTER == 8
13321 return OP_LREM_IMM;
13325 return OP_COMPARE_IMM;
13327 return OP_ICOMPARE_IMM;
13329 return OP_LCOMPARE_IMM;
13331 case OP_STORE_MEMBASE_REG:
13332 return OP_STORE_MEMBASE_IMM;
13333 case OP_STOREI1_MEMBASE_REG:
13334 return OP_STOREI1_MEMBASE_IMM;
13335 case OP_STOREI2_MEMBASE_REG:
13336 return OP_STOREI2_MEMBASE_IMM;
13337 case OP_STOREI4_MEMBASE_REG:
13338 return OP_STOREI4_MEMBASE_IMM;
13340 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13342 return OP_X86_PUSH_IMM;
13343 case OP_X86_COMPARE_MEMBASE_REG:
13344 return OP_X86_COMPARE_MEMBASE_IMM;
13346 #if defined(TARGET_AMD64)
13347 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13348 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13350 case OP_VOIDCALL_REG:
13351 return OP_VOIDCALL;
13359 return OP_LOCALLOC_IMM;
13366 ldind_to_load_membase (int opcode)
13370 return OP_LOADI1_MEMBASE;
13372 return OP_LOADU1_MEMBASE;
13374 return OP_LOADI2_MEMBASE;
13376 return OP_LOADU2_MEMBASE;
13378 return OP_LOADI4_MEMBASE;
13380 return OP_LOADU4_MEMBASE;
13382 return OP_LOAD_MEMBASE;
13383 case CEE_LDIND_REF:
13384 return OP_LOAD_MEMBASE;
13386 return OP_LOADI8_MEMBASE;
13388 return OP_LOADR4_MEMBASE;
13390 return OP_LOADR8_MEMBASE;
13392 g_assert_not_reached ();
13399 stind_to_store_membase (int opcode)
13403 return OP_STOREI1_MEMBASE_REG;
13405 return OP_STOREI2_MEMBASE_REG;
13407 return OP_STOREI4_MEMBASE_REG;
13409 case CEE_STIND_REF:
13410 return OP_STORE_MEMBASE_REG;
13412 return OP_STOREI8_MEMBASE_REG;
13414 return OP_STORER4_MEMBASE_REG;
13416 return OP_STORER8_MEMBASE_REG;
13418 g_assert_not_reached ();
13425 mono_load_membase_to_load_mem (int opcode)
13427 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13428 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13430 case OP_LOAD_MEMBASE:
13431 return OP_LOAD_MEM;
13432 case OP_LOADU1_MEMBASE:
13433 return OP_LOADU1_MEM;
13434 case OP_LOADU2_MEMBASE:
13435 return OP_LOADU2_MEM;
13436 case OP_LOADI4_MEMBASE:
13437 return OP_LOADI4_MEM;
13438 case OP_LOADU4_MEMBASE:
13439 return OP_LOADU4_MEM;
13440 #if SIZEOF_REGISTER == 8
13441 case OP_LOADI8_MEMBASE:
13442 return OP_LOADI8_MEM;
13451 op_to_op_dest_membase (int store_opcode, int opcode)
13453 #if defined(TARGET_X86)
13454 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13459 return OP_X86_ADD_MEMBASE_REG;
13461 return OP_X86_SUB_MEMBASE_REG;
13463 return OP_X86_AND_MEMBASE_REG;
13465 return OP_X86_OR_MEMBASE_REG;
13467 return OP_X86_XOR_MEMBASE_REG;
13470 return OP_X86_ADD_MEMBASE_IMM;
13473 return OP_X86_SUB_MEMBASE_IMM;
13476 return OP_X86_AND_MEMBASE_IMM;
13479 return OP_X86_OR_MEMBASE_IMM;
13482 return OP_X86_XOR_MEMBASE_IMM;
13488 #if defined(TARGET_AMD64)
13489 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13494 return OP_X86_ADD_MEMBASE_REG;
13496 return OP_X86_SUB_MEMBASE_REG;
13498 return OP_X86_AND_MEMBASE_REG;
13500 return OP_X86_OR_MEMBASE_REG;
13502 return OP_X86_XOR_MEMBASE_REG;
13504 return OP_X86_ADD_MEMBASE_IMM;
13506 return OP_X86_SUB_MEMBASE_IMM;
13508 return OP_X86_AND_MEMBASE_IMM;
13510 return OP_X86_OR_MEMBASE_IMM;
13512 return OP_X86_XOR_MEMBASE_IMM;
13514 return OP_AMD64_ADD_MEMBASE_REG;
13516 return OP_AMD64_SUB_MEMBASE_REG;
13518 return OP_AMD64_AND_MEMBASE_REG;
13520 return OP_AMD64_OR_MEMBASE_REG;
13522 return OP_AMD64_XOR_MEMBASE_REG;
13525 return OP_AMD64_ADD_MEMBASE_IMM;
13528 return OP_AMD64_SUB_MEMBASE_IMM;
13531 return OP_AMD64_AND_MEMBASE_IMM;
13534 return OP_AMD64_OR_MEMBASE_IMM;
13537 return OP_AMD64_XOR_MEMBASE_IMM;
13547 op_to_op_store_membase (int store_opcode, int opcode)
13549 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13552 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13553 return OP_X86_SETEQ_MEMBASE;
13555 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13556 return OP_X86_SETNE_MEMBASE;
13564 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13567 /* FIXME: This has sign extension issues */
13569 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13570 return OP_X86_COMPARE_MEMBASE8_IMM;
13573 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13578 return OP_X86_PUSH_MEMBASE;
13579 case OP_COMPARE_IMM:
13580 case OP_ICOMPARE_IMM:
13581 return OP_X86_COMPARE_MEMBASE_IMM;
13584 return OP_X86_COMPARE_MEMBASE_REG;
13588 #ifdef TARGET_AMD64
13589 /* FIXME: This has sign extension issues */
13591 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13592 return OP_X86_COMPARE_MEMBASE8_IMM;
13597 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13598 return OP_X86_PUSH_MEMBASE;
13600 /* FIXME: This only works for 32 bit immediates
13601 case OP_COMPARE_IMM:
13602 case OP_LCOMPARE_IMM:
13603 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13604 return OP_AMD64_COMPARE_MEMBASE_IMM;
13606 case OP_ICOMPARE_IMM:
13607 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13608 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13612 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13613 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13614 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13615 return OP_AMD64_COMPARE_MEMBASE_REG;
13618 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13619 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13628 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13631 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13637 return OP_X86_COMPARE_REG_MEMBASE;
13639 return OP_X86_ADD_REG_MEMBASE;
13641 return OP_X86_SUB_REG_MEMBASE;
13643 return OP_X86_AND_REG_MEMBASE;
13645 return OP_X86_OR_REG_MEMBASE;
13647 return OP_X86_XOR_REG_MEMBASE;
13651 #ifdef TARGET_AMD64
13652 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13655 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13657 return OP_X86_ADD_REG_MEMBASE;
13659 return OP_X86_SUB_REG_MEMBASE;
13661 return OP_X86_AND_REG_MEMBASE;
13663 return OP_X86_OR_REG_MEMBASE;
13665 return OP_X86_XOR_REG_MEMBASE;
13667 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13671 return OP_AMD64_COMPARE_REG_MEMBASE;
13673 return OP_AMD64_ADD_REG_MEMBASE;
13675 return OP_AMD64_SUB_REG_MEMBASE;
13677 return OP_AMD64_AND_REG_MEMBASE;
13679 return OP_AMD64_OR_REG_MEMBASE;
13681 return OP_AMD64_XOR_REG_MEMBASE;
13690 mono_op_to_op_imm_noemul (int opcode)
13693 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13699 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13706 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13711 return mono_op_to_op_imm (opcode);
13716 * mono_handle_global_vregs:
13718 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13722 mono_handle_global_vregs (MonoCompile *cfg)
13724 gint32 *vreg_to_bb;
13725 MonoBasicBlock *bb;
13728 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13730 #ifdef MONO_ARCH_SIMD_INTRINSICS
13731 if (cfg->uses_simd_intrinsics)
13732 mono_simd_simplify_indirection (cfg);
13735 /* Find local vregs used in more than one bb */
13736 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13737 MonoInst *ins = bb->code;
13738 int block_num = bb->block_num;
13740 if (cfg->verbose_level > 2)
13741 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13744 for (; ins; ins = ins->next) {
13745 const char *spec = INS_INFO (ins->opcode);
13746 int regtype = 0, regindex;
13749 if (G_UNLIKELY (cfg->verbose_level > 2))
13750 mono_print_ins (ins);
13752 g_assert (ins->opcode >= MONO_CEE_LAST);
13754 for (regindex = 0; regindex < 4; regindex ++) {
13757 if (regindex == 0) {
13758 regtype = spec [MONO_INST_DEST];
13759 if (regtype == ' ')
13762 } else if (regindex == 1) {
13763 regtype = spec [MONO_INST_SRC1];
13764 if (regtype == ' ')
13767 } else if (regindex == 2) {
13768 regtype = spec [MONO_INST_SRC2];
13769 if (regtype == ' ')
13772 } else if (regindex == 3) {
13773 regtype = spec [MONO_INST_SRC3];
13774 if (regtype == ' ')
13779 #if SIZEOF_REGISTER == 4
13780 /* In the LLVM case, the long opcodes are not decomposed */
13781 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13783 * Since some instructions reference the original long vreg,
13784 * and some reference the two component vregs, it is quite hard
13785 * to determine when it needs to be global. So be conservative.
13787 if (!get_vreg_to_inst (cfg, vreg)) {
13788 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13790 if (cfg->verbose_level > 2)
13791 printf ("LONG VREG R%d made global.\n", vreg);
13795 * Make the component vregs volatile since the optimizations can
13796 * get confused otherwise.
13798 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13799 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13803 g_assert (vreg != -1);
13805 prev_bb = vreg_to_bb [vreg];
13806 if (prev_bb == 0) {
13807 /* 0 is a valid block num */
13808 vreg_to_bb [vreg] = block_num + 1;
13809 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13810 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13813 if (!get_vreg_to_inst (cfg, vreg)) {
13814 if (G_UNLIKELY (cfg->verbose_level > 2))
13815 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13819 if (vreg_is_ref (cfg, vreg))
13820 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13822 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13825 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13828 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13831 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13834 g_assert_not_reached ();
13838 /* Flag as having been used in more than one bb */
13839 vreg_to_bb [vreg] = -1;
13845 /* If a variable is used in only one bblock, convert it into a local vreg */
13846 for (i = 0; i < cfg->num_varinfo; i++) {
13847 MonoInst *var = cfg->varinfo [i];
13848 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13850 switch (var->type) {
13856 #if SIZEOF_REGISTER == 8
13859 #if !defined(TARGET_X86)
13860 /* Enabling this screws up the fp stack on x86 */
13863 if (mono_arch_is_soft_float ())
13866 /* Arguments are implicitly global */
13867 /* Putting R4 vars into registers doesn't work currently */
13868 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13869 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13871 * Make that the variable's liveness interval doesn't contain a call, since
13872 * that would cause the lvreg to be spilled, making the whole optimization
13875 /* This is too slow for JIT compilation */
13877 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13879 int def_index, call_index, ins_index;
13880 gboolean spilled = FALSE;
13885 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13886 const char *spec = INS_INFO (ins->opcode);
13888 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13889 def_index = ins_index;
13891 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13892 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13893 if (call_index > def_index) {
13899 if (MONO_IS_CALL (ins))
13900 call_index = ins_index;
13910 if (G_UNLIKELY (cfg->verbose_level > 2))
13911 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13912 var->flags |= MONO_INST_IS_DEAD;
13913 cfg->vreg_to_inst [var->dreg] = NULL;
13920 * Compress the varinfo and vars tables so the liveness computation is faster and
13921 * takes up less space.
13924 for (i = 0; i < cfg->num_varinfo; ++i) {
13925 MonoInst *var = cfg->varinfo [i];
13926 if (pos < i && cfg->locals_start == i)
13927 cfg->locals_start = pos;
13928 if (!(var->flags & MONO_INST_IS_DEAD)) {
13930 cfg->varinfo [pos] = cfg->varinfo [i];
13931 cfg->varinfo [pos]->inst_c0 = pos;
13932 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13933 cfg->vars [pos].idx = pos;
13934 #if SIZEOF_REGISTER == 4
13935 if (cfg->varinfo [pos]->type == STACK_I8) {
13936 /* Modify the two component vars too */
13939 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13940 var1->inst_c0 = pos;
13941 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13942 var1->inst_c0 = pos;
13949 cfg->num_varinfo = pos;
13950 if (cfg->locals_start > cfg->num_varinfo)
13951 cfg->locals_start = cfg->num_varinfo;
13955 * mono_spill_global_vars:
13957 * Generate spill code for variables which are not allocated to registers,
13958 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13959 * code is generated which could be optimized by the local optimization passes.
13962 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13964 MonoBasicBlock *bb;
13966 int orig_next_vreg;
13967 guint32 *vreg_to_lvreg;
13969 guint32 i, lvregs_len;
13970 gboolean dest_has_lvreg = FALSE;
13971 guint32 stacktypes [128];
13972 MonoInst **live_range_start, **live_range_end;
13973 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13974 int *gsharedvt_vreg_to_idx = NULL;
13976 *need_local_opts = FALSE;
13978 memset (spec2, 0, sizeof (spec2));
13980 /* FIXME: Move this function to mini.c */
13981 stacktypes ['i'] = STACK_PTR;
13982 stacktypes ['l'] = STACK_I8;
13983 stacktypes ['f'] = STACK_R8;
13984 #ifdef MONO_ARCH_SIMD_INTRINSICS
13985 stacktypes ['x'] = STACK_VTYPE;
13988 #if SIZEOF_REGISTER == 4
13989 /* Create MonoInsts for longs */
13990 for (i = 0; i < cfg->num_varinfo; i++) {
13991 MonoInst *ins = cfg->varinfo [i];
13993 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13994 switch (ins->type) {
13999 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14002 g_assert (ins->opcode == OP_REGOFFSET);
14004 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
14006 tree->opcode = OP_REGOFFSET;
14007 tree->inst_basereg = ins->inst_basereg;
14008 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14010 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
14012 tree->opcode = OP_REGOFFSET;
14013 tree->inst_basereg = ins->inst_basereg;
14014 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14024 if (cfg->compute_gc_maps) {
14025 /* registers need liveness info even for !non refs */
14026 for (i = 0; i < cfg->num_varinfo; i++) {
14027 MonoInst *ins = cfg->varinfo [i];
14029 if (ins->opcode == OP_REGVAR)
14030 ins->flags |= MONO_INST_GC_TRACK;
14034 if (cfg->gsharedvt) {
14035 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14037 for (i = 0; i < cfg->num_varinfo; ++i) {
14038 MonoInst *ins = cfg->varinfo [i];
14041 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14042 if (i >= cfg->locals_start) {
14044 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14045 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14046 ins->opcode = OP_GSHAREDVT_LOCAL;
14047 ins->inst_imm = idx;
14050 gsharedvt_vreg_to_idx [ins->dreg] = -1;
14051 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14057 /* FIXME: widening and truncation */
14060 * As an optimization, when a variable allocated to the stack is first loaded into
14061 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14062 * the variable again.
14064 orig_next_vreg = cfg->next_vreg;
14065 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14066 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14070 * These arrays contain the first and last instructions accessing a given
14072 * Since we emit bblocks in the same order we process them here, and we
14073 * don't split live ranges, these will precisely describe the live range of
14074 * the variable, i.e. the instruction range where a valid value can be found
14075 * in the variables location.
14076 * The live range is computed using the liveness info computed by the liveness pass.
14077 * We can't use vmv->range, since that is an abstract live range, and we need
14078 * one which is instruction precise.
14079 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14081 /* FIXME: Only do this if debugging info is requested */
14082 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14083 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14084 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14085 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14087 /* Add spill loads/stores */
14088 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14091 if (cfg->verbose_level > 2)
14092 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14094 /* Clear vreg_to_lvreg array */
14095 for (i = 0; i < lvregs_len; i++)
14096 vreg_to_lvreg [lvregs [i]] = 0;
14100 MONO_BB_FOR_EACH_INS (bb, ins) {
14101 const char *spec = INS_INFO (ins->opcode);
14102 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14103 gboolean store, no_lvreg;
14104 int sregs [MONO_MAX_SRC_REGS];
14106 if (G_UNLIKELY (cfg->verbose_level > 2))
14107 mono_print_ins (ins);
14109 if (ins->opcode == OP_NOP)
14113 * We handle LDADDR here as well, since it can only be decomposed
14114 * when variable addresses are known.
14116 if (ins->opcode == OP_LDADDR) {
14117 MonoInst *var = ins->inst_p0;
14119 if (var->opcode == OP_VTARG_ADDR) {
14120 /* Happens on SPARC/S390 where vtypes are passed by reference */
14121 MonoInst *vtaddr = var->inst_left;
14122 if (vtaddr->opcode == OP_REGVAR) {
14123 ins->opcode = OP_MOVE;
14124 ins->sreg1 = vtaddr->dreg;
14126 else if (var->inst_left->opcode == OP_REGOFFSET) {
14127 ins->opcode = OP_LOAD_MEMBASE;
14128 ins->inst_basereg = vtaddr->inst_basereg;
14129 ins->inst_offset = vtaddr->inst_offset;
14132 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14133 /* gsharedvt arg passed by ref */
14134 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14136 ins->opcode = OP_LOAD_MEMBASE;
14137 ins->inst_basereg = var->inst_basereg;
14138 ins->inst_offset = var->inst_offset;
14139 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14140 MonoInst *load, *load2, *load3;
14141 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14142 int reg1, reg2, reg3;
14143 MonoInst *info_var = cfg->gsharedvt_info_var;
14144 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14148 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14151 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14153 g_assert (info_var);
14154 g_assert (locals_var);
14156 /* Mark the instruction used to compute the locals var as used */
14157 cfg->gsharedvt_locals_var_ins = NULL;
14159 /* Load the offset */
14160 if (info_var->opcode == OP_REGOFFSET) {
14161 reg1 = alloc_ireg (cfg);
14162 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14163 } else if (info_var->opcode == OP_REGVAR) {
14165 reg1 = info_var->dreg;
14167 g_assert_not_reached ();
14169 reg2 = alloc_ireg (cfg);
14170 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14171 /* Load the locals area address */
14172 reg3 = alloc_ireg (cfg);
14173 if (locals_var->opcode == OP_REGOFFSET) {
14174 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14175 } else if (locals_var->opcode == OP_REGVAR) {
14176 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14178 g_assert_not_reached ();
14180 /* Compute the address */
14181 ins->opcode = OP_PADD;
14185 mono_bblock_insert_before_ins (bb, ins, load3);
14186 mono_bblock_insert_before_ins (bb, load3, load2);
14188 mono_bblock_insert_before_ins (bb, load2, load);
14190 g_assert (var->opcode == OP_REGOFFSET);
14192 ins->opcode = OP_ADD_IMM;
14193 ins->sreg1 = var->inst_basereg;
14194 ins->inst_imm = var->inst_offset;
14197 *need_local_opts = TRUE;
14198 spec = INS_INFO (ins->opcode);
14201 if (ins->opcode < MONO_CEE_LAST) {
14202 mono_print_ins (ins);
14203 g_assert_not_reached ();
14207 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14211 if (MONO_IS_STORE_MEMBASE (ins)) {
14212 tmp_reg = ins->dreg;
14213 ins->dreg = ins->sreg2;
14214 ins->sreg2 = tmp_reg;
14217 spec2 [MONO_INST_DEST] = ' ';
14218 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14219 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14220 spec2 [MONO_INST_SRC3] = ' ';
14222 } else if (MONO_IS_STORE_MEMINDEX (ins))
14223 g_assert_not_reached ();
14228 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14229 printf ("\t %.3s %d", spec, ins->dreg);
14230 num_sregs = mono_inst_get_src_registers (ins, sregs);
14231 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14232 printf (" %d", sregs [srcindex]);
14239 regtype = spec [MONO_INST_DEST];
14240 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14243 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14244 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14245 MonoInst *store_ins;
14247 MonoInst *def_ins = ins;
14248 int dreg = ins->dreg; /* The original vreg */
14250 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14252 if (var->opcode == OP_REGVAR) {
14253 ins->dreg = var->dreg;
14254 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14256 * Instead of emitting a load+store, use a _membase opcode.
14258 g_assert (var->opcode == OP_REGOFFSET);
14259 if (ins->opcode == OP_MOVE) {
14263 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14264 ins->inst_basereg = var->inst_basereg;
14265 ins->inst_offset = var->inst_offset;
14268 spec = INS_INFO (ins->opcode);
14272 g_assert (var->opcode == OP_REGOFFSET);
14274 prev_dreg = ins->dreg;
14276 /* Invalidate any previous lvreg for this vreg */
14277 vreg_to_lvreg [ins->dreg] = 0;
14281 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14283 store_opcode = OP_STOREI8_MEMBASE_REG;
14286 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14288 #if SIZEOF_REGISTER != 8
14289 if (regtype == 'l') {
14290 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14291 mono_bblock_insert_after_ins (bb, ins, store_ins);
14292 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14293 mono_bblock_insert_after_ins (bb, ins, store_ins);
14294 def_ins = store_ins;
14299 g_assert (store_opcode != OP_STOREV_MEMBASE);
14301 /* Try to fuse the store into the instruction itself */
14302 /* FIXME: Add more instructions */
14303 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14304 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14305 ins->inst_imm = ins->inst_c0;
14306 ins->inst_destbasereg = var->inst_basereg;
14307 ins->inst_offset = var->inst_offset;
14308 spec = INS_INFO (ins->opcode);
14309 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14310 ins->opcode = store_opcode;
14311 ins->inst_destbasereg = var->inst_basereg;
14312 ins->inst_offset = var->inst_offset;
14316 tmp_reg = ins->dreg;
14317 ins->dreg = ins->sreg2;
14318 ins->sreg2 = tmp_reg;
14321 spec2 [MONO_INST_DEST] = ' ';
14322 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14323 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14324 spec2 [MONO_INST_SRC3] = ' ';
14326 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14327 // FIXME: The backends expect the base reg to be in inst_basereg
14328 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14330 ins->inst_basereg = var->inst_basereg;
14331 ins->inst_offset = var->inst_offset;
14332 spec = INS_INFO (ins->opcode);
14334 /* printf ("INS: "); mono_print_ins (ins); */
14335 /* Create a store instruction */
14336 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14338 /* Insert it after the instruction */
14339 mono_bblock_insert_after_ins (bb, ins, store_ins);
14341 def_ins = store_ins;
14344 * We can't assign ins->dreg to var->dreg here, since the
14345 * sregs could use it. So set a flag, and do it after
14348 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14349 dest_has_lvreg = TRUE;
14354 if (def_ins && !live_range_start [dreg]) {
14355 live_range_start [dreg] = def_ins;
14356 live_range_start_bb [dreg] = bb;
14359 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14362 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14363 tmp->inst_c1 = dreg;
14364 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14371 num_sregs = mono_inst_get_src_registers (ins, sregs);
14372 for (srcindex = 0; srcindex < 3; ++srcindex) {
14373 regtype = spec [MONO_INST_SRC1 + srcindex];
14374 sreg = sregs [srcindex];
14376 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14377 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14378 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14379 MonoInst *use_ins = ins;
14380 MonoInst *load_ins;
14381 guint32 load_opcode;
14383 if (var->opcode == OP_REGVAR) {
14384 sregs [srcindex] = var->dreg;
14385 //mono_inst_set_src_registers (ins, sregs);
14386 live_range_end [sreg] = use_ins;
14387 live_range_end_bb [sreg] = bb;
14389 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14392 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14393 /* var->dreg is a hreg */
14394 tmp->inst_c1 = sreg;
14395 mono_bblock_insert_after_ins (bb, ins, tmp);
14401 g_assert (var->opcode == OP_REGOFFSET);
14403 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14405 g_assert (load_opcode != OP_LOADV_MEMBASE);
14407 if (vreg_to_lvreg [sreg]) {
14408 g_assert (vreg_to_lvreg [sreg] != -1);
14410 /* The variable is already loaded to an lvreg */
14411 if (G_UNLIKELY (cfg->verbose_level > 2))
14412 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14413 sregs [srcindex] = vreg_to_lvreg [sreg];
14414 //mono_inst_set_src_registers (ins, sregs);
14418 /* Try to fuse the load into the instruction */
14419 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14420 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14421 sregs [0] = var->inst_basereg;
14422 //mono_inst_set_src_registers (ins, sregs);
14423 ins->inst_offset = var->inst_offset;
14424 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14425 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14426 sregs [1] = var->inst_basereg;
14427 //mono_inst_set_src_registers (ins, sregs);
14428 ins->inst_offset = var->inst_offset;
14430 if (MONO_IS_REAL_MOVE (ins)) {
14431 ins->opcode = OP_NOP;
14434 //printf ("%d ", srcindex); mono_print_ins (ins);
14436 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14438 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14439 if (var->dreg == prev_dreg) {
14441 * sreg refers to the value loaded by the load
14442 * emitted below, but we need to use ins->dreg
14443 * since it refers to the store emitted earlier.
14447 g_assert (sreg != -1);
14448 vreg_to_lvreg [var->dreg] = sreg;
14449 g_assert (lvregs_len < 1024);
14450 lvregs [lvregs_len ++] = var->dreg;
14454 sregs [srcindex] = sreg;
14455 //mono_inst_set_src_registers (ins, sregs);
14457 #if SIZEOF_REGISTER != 8
14458 if (regtype == 'l') {
14459 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14460 mono_bblock_insert_before_ins (bb, ins, load_ins);
14461 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14462 mono_bblock_insert_before_ins (bb, ins, load_ins);
14463 use_ins = load_ins;
14468 #if SIZEOF_REGISTER == 4
14469 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14471 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14472 mono_bblock_insert_before_ins (bb, ins, load_ins);
14473 use_ins = load_ins;
14477 if (var->dreg < orig_next_vreg) {
14478 live_range_end [var->dreg] = use_ins;
14479 live_range_end_bb [var->dreg] = bb;
14482 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14485 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14486 tmp->inst_c1 = var->dreg;
14487 mono_bblock_insert_after_ins (bb, ins, tmp);
14491 mono_inst_set_src_registers (ins, sregs);
14493 if (dest_has_lvreg) {
14494 g_assert (ins->dreg != -1);
14495 vreg_to_lvreg [prev_dreg] = ins->dreg;
14496 g_assert (lvregs_len < 1024);
14497 lvregs [lvregs_len ++] = prev_dreg;
14498 dest_has_lvreg = FALSE;
14502 tmp_reg = ins->dreg;
14503 ins->dreg = ins->sreg2;
14504 ins->sreg2 = tmp_reg;
14507 if (MONO_IS_CALL (ins)) {
14508 /* Clear vreg_to_lvreg array */
14509 for (i = 0; i < lvregs_len; i++)
14510 vreg_to_lvreg [lvregs [i]] = 0;
14512 } else if (ins->opcode == OP_NOP) {
14514 MONO_INST_NULLIFY_SREGS (ins);
14517 if (cfg->verbose_level > 2)
14518 mono_print_ins_index (1, ins);
14521 /* Extend the live range based on the liveness info */
14522 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14523 for (i = 0; i < cfg->num_varinfo; i ++) {
14524 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14526 if (vreg_is_volatile (cfg, vi->vreg))
14527 /* The liveness info is incomplete */
14530 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14531 /* Live from at least the first ins of this bb */
14532 live_range_start [vi->vreg] = bb->code;
14533 live_range_start_bb [vi->vreg] = bb;
14536 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14537 /* Live at least until the last ins of this bb */
14538 live_range_end [vi->vreg] = bb->last_ins;
14539 live_range_end_bb [vi->vreg] = bb;
14546 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14547 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14549 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14550 for (i = 0; i < cfg->num_varinfo; ++i) {
14551 int vreg = MONO_VARINFO (cfg, i)->vreg;
14554 if (live_range_start [vreg]) {
14555 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14557 ins->inst_c1 = vreg;
14558 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14560 if (live_range_end [vreg]) {
14561 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14563 ins->inst_c1 = vreg;
14564 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14565 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14567 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14572 if (cfg->gsharedvt_locals_var_ins) {
14573 /* Nullify if unused */
14574 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14575 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14578 g_free (live_range_start);
14579 g_free (live_range_end);
14580 g_free (live_range_start_bb);
14581 g_free (live_range_end_bb);
14586 * - use 'iadd' instead of 'int_add'
14587 * - handling ovf opcodes: decompose in method_to_ir.
14588 * - unify iregs/fregs
14589 * -> partly done, the missing parts are:
14590 * - a more complete unification would involve unifying the hregs as well, so
14591 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14592 * would no longer map to the machine hregs, so the code generators would need to
14593 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14594 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14595 * fp/non-fp branches speeds it up by about 15%.
14596 * - use sext/zext opcodes instead of shifts
14598 * - get rid of TEMPLOADs if possible and use vregs instead
14599 * - clean up usage of OP_P/OP_ opcodes
14600 * - cleanup usage of DUMMY_USE
14601 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14603 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14604 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14605 * - make sure handle_stack_args () is called before the branch is emitted
14606 * - when the new IR is done, get rid of all unused stuff
14607 * - COMPARE/BEQ as separate instructions or unify them ?
14608 * - keeping them separate allows specialized compare instructions like
14609 * compare_imm, compare_membase
14610 * - most back ends unify fp compare+branch, fp compare+ceq
14611 * - integrate mono_save_args into inline_method
14612 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14613 * - handle long shift opts on 32 bit platforms somehow: they require
14614 * 3 sregs (2 for arg1 and 1 for arg2)
14615 * - make byref a 'normal' type.
14616 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14617 * variable if needed.
14618 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14619 * like inline_method.
14620 * - remove inlining restrictions
14621 * - fix LNEG and enable cfold of INEG
14622 * - generalize x86 optimizations like ldelema as a peephole optimization
14623 * - add store_mem_imm for amd64
14624 * - optimize the loading of the interruption flag in the managed->native wrappers
14625 * - avoid special handling of OP_NOP in passes
14626 * - move code inserting instructions into one function/macro.
14627 * - try a coalescing phase after liveness analysis
14628 * - add float -> vreg conversion + local optimizations on !x86
14629 * - figure out how to handle decomposed branches during optimizations, ie.
14630 * compare+branch, op_jump_table+op_br etc.
14631 * - promote RuntimeXHandles to vregs
14632 * - vtype cleanups:
14633 * - add a NEW_VARLOADA_VREG macro
14634 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14635 * accessing vtype fields.
14636 * - get rid of I8CONST on 64 bit platforms
14637 * - dealing with the increase in code size due to branches created during opcode
14639 * - use extended basic blocks
14640 * - all parts of the JIT
14641 * - handle_global_vregs () && local regalloc
14642 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14643 * - sources of increase in code size:
14646 * - isinst and castclass
14647 * - lvregs not allocated to global registers even if used multiple times
14648 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14650 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14651 * - add all micro optimizations from the old JIT
14652 * - put tree optimizations into the deadce pass
14653 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14654 * specific function.
14655 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14656 * fcompare + branchCC.
14657 * - create a helper function for allocating a stack slot, taking into account
14658 * MONO_CFG_HAS_SPILLUP.
14660 * - merge the ia64 switch changes.
14661 * - optimize mono_regstate2_alloc_int/float.
14662 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14663 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14664 * parts of the tree could be separated by other instructions, killing the tree
14665 * arguments, or stores killing loads etc. Also, should we fold loads into other
14666 * instructions if the result of the load is used multiple times ?
14667 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14668 * - LAST MERGE: 108395.
14669 * - when returning vtypes in registers, generate IR and append it to the end of the
14670 * last bb instead of doing it in the epilog.
14671 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14679 - When to decompose opcodes:
14680 - earlier: this makes some optimizations hard to implement, since the low level IR
14681 no longer contains the neccessary information. But it is easier to do.
14682 - later: harder to implement, enables more optimizations.
14683 - Branches inside bblocks:
14684 - created when decomposing complex opcodes.
14685 - branches to another bblock: harmless, but not tracked by the branch
14686 optimizations, so need to branch to a label at the start of the bblock.
14687 - branches to inside the same bblock: very problematic, trips up the local
14688 reg allocator. Can be fixed by spitting the current bblock, but that is a
14689 complex operation, since some local vregs can become global vregs etc.
14690 - Local/global vregs:
14691 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14692 local register allocator.
14693 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14694 structure, created by mono_create_var (). Assigned to hregs or the stack by
14695 the global register allocator.
14696 - When to do optimizations like alu->alu_imm:
14697 - earlier -> saves work later on since the IR will be smaller/simpler
14698 - later -> can work on more instructions
14699 - Handling of valuetypes:
14700 - When a vtype is pushed on the stack, a new temporary is created, an
14701 instruction computing its address (LDADDR) is emitted and pushed on
14702 the stack. Need to optimize cases when the vtype is used immediately as in
14703 argument passing, stloc etc.
14704 - Instead of the to_end stuff in the old JIT, simply call the function handling
14705 the values on the stack before emitting the last instruction of the bb.
14708 #endif /* DISABLE_JIT */