2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_domain_get;
150 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
153 * Instruction metadata
161 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
162 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
168 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
173 /* keep in sync with the enum in mini.h */
176 #include "mini-ops.h"
181 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
182 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
184 * This should contain the index of the last sreg + 1. This is not the same
185 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
187 const gint8 ins_sreg_counts[] = {
188 #include "mini-ops.h"
193 #define MONO_INIT_VARINFO(vi,id) do { \
194 (vi)->range.first_use.pos.bid = 0xffff; \
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_lreg (MonoCompile *cfg)
208 return alloc_lreg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_get_underlying_type (type);
275 switch (type->type) {
288 case MONO_TYPE_FNPTR:
290 case MONO_TYPE_CLASS:
291 case MONO_TYPE_STRING:
292 case MONO_TYPE_OBJECT:
293 case MONO_TYPE_SZARRAY:
294 case MONO_TYPE_ARRAY:
298 #if SIZEOF_REGISTER == 8
304 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
307 case MONO_TYPE_VALUETYPE:
308 if (type->data.klass->enumtype) {
309 type = mono_class_enum_basetype (type->data.klass);
312 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
315 case MONO_TYPE_TYPEDBYREF:
317 case MONO_TYPE_GENERICINST:
318 type = &type->data.generic_class->container_class->byval_arg;
322 g_assert (cfg->gshared);
323 if (mini_type_var_is_vt (type))
326 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
328 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
334 mono_print_bb (MonoBasicBlock *bb, const char *msg)
339 printf ("\n%s %d: [IN: ", msg, bb->block_num);
340 for (i = 0; i < bb->in_count; ++i)
341 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
343 for (i = 0; i < bb->out_count; ++i)
344 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
346 for (tree = bb->code; tree; tree = tree->next)
347 mono_print_ins_index (-1, tree);
351 mono_create_helper_signatures (void)
353 helper_sig_domain_get = mono_create_icall_signature ("ptr");
354 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
357 static MONO_NEVER_INLINE void
358 break_on_unverified (void)
360 if (mini_get_debug_options ()->break_on_unverified)
364 static MONO_NEVER_INLINE void
365 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
367 char *method_fname = mono_method_full_name (method, TRUE);
368 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
369 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
370 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
371 g_free (method_fname);
372 g_free (cil_method_fname);
375 static MONO_NEVER_INLINE void
376 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
378 char *method_fname = mono_method_full_name (method, TRUE);
379 char *field_fname = mono_field_full_name (field);
380 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
381 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
382 g_free (method_fname);
383 g_free (field_fname);
386 static MONO_NEVER_INLINE void
387 inline_failure (MonoCompile *cfg, const char *msg)
389 if (cfg->verbose_level >= 2)
390 printf ("inline failed: %s\n", msg);
391 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
394 static MONO_NEVER_INLINE void
395 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
397 if (cfg->verbose_level > 2) \
398 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
402 static MONO_NEVER_INLINE void
403 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
406 if (cfg->verbose_level >= 2)
407 printf ("%s\n", cfg->exception_message);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
412 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
413 * foo<T> (int i) { ldarg.0; box T; }
415 #define UNVERIFIED do { \
416 if (cfg->gsharedvt) { \
417 if (cfg->verbose_level > 2) \
418 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
419 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
420 goto exception_exit; \
422 break_on_unverified (); \
426 #define GET_BBLOCK(cfg,tblock,ip) do { \
427 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
429 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
430 NEW_BBLOCK (cfg, (tblock)); \
431 (tblock)->cil_code = (ip); \
432 ADD_BBLOCK (cfg, (tblock)); \
436 #if defined(TARGET_X86) || defined(TARGET_AMD64)
437 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
438 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
439 (dest)->dreg = alloc_ireg_mp ((cfg)); \
440 (dest)->sreg1 = (sr1); \
441 (dest)->sreg2 = (sr2); \
442 (dest)->inst_imm = (imm); \
443 (dest)->backend.shift_amount = (shift); \
444 MONO_ADD_INS ((cfg)->cbb, (dest)); \
448 /* Emit conversions so both operands of a binary opcode are of the same type */
450 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
452 MonoInst *arg1 = *arg1_ref;
453 MonoInst *arg2 = *arg2_ref;
456 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
457 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
460 /* Mixing r4/r8 is allowed by the spec */
461 if (arg1->type == STACK_R4) {
462 int dreg = alloc_freg (cfg);
464 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
465 conv->type = STACK_R8;
469 if (arg2->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
473 conv->type = STACK_R8;
479 #if SIZEOF_REGISTER == 8
480 /* FIXME: Need to add many more cases */
481 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
484 int dr = alloc_preg (cfg);
485 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
486 (ins)->sreg2 = widen->dreg;
491 #define ADD_BINOP(op) do { \
492 MONO_INST_NEW (cfg, ins, (op)); \
494 ins->sreg1 = sp [0]->dreg; \
495 ins->sreg2 = sp [1]->dreg; \
496 type_from_op (cfg, ins, sp [0], sp [1]); \
498 /* Have to insert a widening op */ \
499 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
500 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
501 MONO_ADD_INS ((cfg)->cbb, (ins)); \
502 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
505 #define ADD_UNOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 type_from_op (cfg, ins, sp [0], NULL); \
511 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
512 MONO_ADD_INS ((cfg)->cbb, (ins)); \
513 *sp++ = mono_decompose_opcode (cfg, ins); \
516 #define ADD_BINCOND(next_block) do { \
519 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
520 cmp->sreg1 = sp [0]->dreg; \
521 cmp->sreg2 = sp [1]->dreg; \
522 type_from_op (cfg, cmp, sp [0], sp [1]); \
524 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
525 type_from_op (cfg, ins, sp [0], sp [1]); \
526 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
527 GET_BBLOCK (cfg, tblock, target); \
528 link_bblock (cfg, cfg->cbb, tblock); \
529 ins->inst_true_bb = tblock; \
530 if ((next_block)) { \
531 link_bblock (cfg, cfg->cbb, (next_block)); \
532 ins->inst_false_bb = (next_block); \
533 start_new_bblock = 1; \
535 GET_BBLOCK (cfg, tblock, ip); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_false_bb = tblock; \
538 start_new_bblock = 2; \
540 if (sp != stack_start) { \
541 handle_stack_args (cfg, stack_start, sp - stack_start); \
542 CHECK_UNVERIFIABLE (cfg); \
544 MONO_ADD_INS (cfg->cbb, cmp); \
545 MONO_ADD_INS (cfg->cbb, ins); \
549 * link_bblock: Links two basic blocks
551 * links two basic blocks in the control flow graph, the 'from'
552 * argument is the starting block and the 'to' argument is the block
553 * the control flow ends to after 'from'.
556 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
558 MonoBasicBlock **newa;
562 if (from->cil_code) {
564 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
566 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
569 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
571 printf ("edge from entry to exit\n");
576 for (i = 0; i < from->out_count; ++i) {
577 if (to == from->out_bb [i]) {
583 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
584 for (i = 0; i < from->out_count; ++i) {
585 newa [i] = from->out_bb [i];
593 for (i = 0; i < to->in_count; ++i) {
594 if (from == to->in_bb [i]) {
600 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
601 for (i = 0; i < to->in_count; ++i) {
602 newa [i] = to->in_bb [i];
611 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
613 link_bblock (cfg, from, to);
617 * mono_find_block_region:
619 * We mark each basic block with a region ID. We use that to avoid BB
620 * optimizations when blocks are in different regions.
623 * A region token that encodes where this region is, and information
624 * about the clause owner for this block.
626 * The region encodes the try/catch/filter clause that owns this block
627 * as well as the type. -1 is a special value that represents a block
628 * that is in none of try/catch/filter.
631 mono_find_block_region (MonoCompile *cfg, int offset)
633 MonoMethodHeader *header = cfg->header;
634 MonoExceptionClause *clause;
637 for (i = 0; i < header->num_clauses; ++i) {
638 clause = &header->clauses [i];
639 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
640 (offset < (clause->handler_offset)))
641 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
643 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
644 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
645 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
646 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
647 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
649 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
652 for (i = 0; i < header->num_clauses; ++i) {
653 clause = &header->clauses [i];
655 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
656 return ((i + 1) << 8) | clause->flags;
663 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
665 MonoMethodHeader *header = cfg->header;
666 MonoExceptionClause *clause;
670 for (i = 0; i < header->num_clauses; ++i) {
671 clause = &header->clauses [i];
672 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
673 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
674 if (clause->flags == type)
675 res = g_list_append (res, clause);
682 mono_create_spvar_for_region (MonoCompile *cfg, int region)
686 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
690 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
691 /* prevent it from being register allocated */
692 var->flags |= MONO_INST_VOLATILE;
694 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
698 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
700 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
704 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
708 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
712 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
713 /* prevent it from being register allocated */
714 var->flags |= MONO_INST_VOLATILE;
716 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
722 * Returns the type used in the eval stack when @type is loaded.
723 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
726 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
730 type = mini_get_underlying_type (type);
731 inst->klass = klass = mono_class_from_mono_type (type);
733 inst->type = STACK_MP;
738 switch (type->type) {
740 inst->type = STACK_INV;
748 inst->type = STACK_I4;
753 case MONO_TYPE_FNPTR:
754 inst->type = STACK_PTR;
756 case MONO_TYPE_CLASS:
757 case MONO_TYPE_STRING:
758 case MONO_TYPE_OBJECT:
759 case MONO_TYPE_SZARRAY:
760 case MONO_TYPE_ARRAY:
761 inst->type = STACK_OBJ;
765 inst->type = STACK_I8;
768 inst->type = cfg->r4_stack_type;
771 inst->type = STACK_R8;
773 case MONO_TYPE_VALUETYPE:
774 if (type->data.klass->enumtype) {
775 type = mono_class_enum_basetype (type->data.klass);
779 inst->type = STACK_VTYPE;
782 case MONO_TYPE_TYPEDBYREF:
783 inst->klass = mono_defaults.typed_reference_class;
784 inst->type = STACK_VTYPE;
786 case MONO_TYPE_GENERICINST:
787 type = &type->data.generic_class->container_class->byval_arg;
791 g_assert (cfg->gshared);
792 if (mini_is_gsharedvt_type (type)) {
793 g_assert (cfg->gsharedvt);
794 inst->type = STACK_VTYPE;
796 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
800 g_error ("unknown type 0x%02x in eval stack type", type->type);
805 * The following tables are used to quickly validate the IL code in type_from_op ().
808 bin_num_table [STACK_MAX] [STACK_MAX] = {
809 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
814 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
816 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
822 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
825 /* reduce the size of this table */
827 bin_int_table [STACK_MAX] [STACK_MAX] = {
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
839 bin_comp_table [STACK_MAX] [STACK_MAX] = {
840 /* Inv i L p F & O vt r4 */
842 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
843 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
844 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
845 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
846 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
847 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
848 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
849 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
852 /* reduce the size of this table */
854 shift_table [STACK_MAX] [STACK_MAX] = {
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
866 * Tables to map from the non-specific opcode to the matching
867 * type-specific opcode.
869 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
871 binops_op_map [STACK_MAX] = {
872 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
875 /* handles from CEE_NEG to CEE_CONV_U8 */
877 unops_op_map [STACK_MAX] = {
878 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
881 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
883 ovfops_op_map [STACK_MAX] = {
884 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
887 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
889 ovf2ops_op_map [STACK_MAX] = {
890 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
893 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
895 ovf3ops_op_map [STACK_MAX] = {
896 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
899 /* handles from CEE_BEQ to CEE_BLT_UN */
901 beqops_op_map [STACK_MAX] = {
902 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
905 /* handles from CEE_CEQ to CEE_CLT_UN */
907 ceqops_op_map [STACK_MAX] = {
908 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
912 * Sets ins->type (the type on the eval stack) according to the
913 * type of the opcode and the arguments to it.
914 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
916 * FIXME: this function sets ins->type unconditionally in some cases, but
917 * it should set it to invalid for some types (a conv.x on an object)
920 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
922 switch (ins->opcode) {
929 /* FIXME: check unverifiable args for STACK_MP */
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += binops_op_map [ins->type];
938 ins->type = bin_int_table [src1->type] [src2->type];
939 ins->opcode += binops_op_map [ins->type];
944 ins->type = shift_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
950 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
951 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
952 ins->opcode = OP_LCOMPARE;
953 else if (src1->type == STACK_R4)
954 ins->opcode = OP_RCOMPARE;
955 else if (src1->type == STACK_R8)
956 ins->opcode = OP_FCOMPARE;
958 ins->opcode = OP_ICOMPARE;
960 case OP_ICOMPARE_IMM:
961 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
962 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
963 ins->opcode = OP_LCOMPARE_IMM;
975 ins->opcode += beqops_op_map [src1->type];
978 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
979 ins->opcode += ceqops_op_map [src1->type];
985 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
986 ins->opcode += ceqops_op_map [src1->type];
990 ins->type = neg_table [src1->type];
991 ins->opcode += unops_op_map [ins->type];
994 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
995 ins->type = src1->type;
997 ins->type = STACK_INV;
998 ins->opcode += unops_op_map [ins->type];
1004 ins->type = STACK_I4;
1005 ins->opcode += unops_op_map [src1->type];
1008 ins->type = STACK_R8;
1009 switch (src1->type) {
1012 ins->opcode = OP_ICONV_TO_R_UN;
1015 ins->opcode = OP_LCONV_TO_R_UN;
1019 case CEE_CONV_OVF_I1:
1020 case CEE_CONV_OVF_U1:
1021 case CEE_CONV_OVF_I2:
1022 case CEE_CONV_OVF_U2:
1023 case CEE_CONV_OVF_I4:
1024 case CEE_CONV_OVF_U4:
1025 ins->type = STACK_I4;
1026 ins->opcode += ovf3ops_op_map [src1->type];
1028 case CEE_CONV_OVF_I_UN:
1029 case CEE_CONV_OVF_U_UN:
1030 ins->type = STACK_PTR;
1031 ins->opcode += ovf2ops_op_map [src1->type];
1033 case CEE_CONV_OVF_I1_UN:
1034 case CEE_CONV_OVF_I2_UN:
1035 case CEE_CONV_OVF_I4_UN:
1036 case CEE_CONV_OVF_U1_UN:
1037 case CEE_CONV_OVF_U2_UN:
1038 case CEE_CONV_OVF_U4_UN:
1039 ins->type = STACK_I4;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1043 ins->type = STACK_PTR;
1044 switch (src1->type) {
1046 ins->opcode = OP_ICONV_TO_U;
1050 #if SIZEOF_VOID_P == 8
1051 ins->opcode = OP_LCONV_TO_U;
1053 ins->opcode = OP_MOVE;
1057 ins->opcode = OP_LCONV_TO_U;
1060 ins->opcode = OP_FCONV_TO_U;
1066 ins->type = STACK_I8;
1067 ins->opcode += unops_op_map [src1->type];
1069 case CEE_CONV_OVF_I8:
1070 case CEE_CONV_OVF_U8:
1071 ins->type = STACK_I8;
1072 ins->opcode += ovf3ops_op_map [src1->type];
1074 case CEE_CONV_OVF_U8_UN:
1075 case CEE_CONV_OVF_I8_UN:
1076 ins->type = STACK_I8;
1077 ins->opcode += ovf2ops_op_map [src1->type];
1080 ins->type = cfg->r4_stack_type;
1081 ins->opcode += unops_op_map [src1->type];
1084 ins->type = STACK_R8;
1085 ins->opcode += unops_op_map [src1->type];
1088 ins->type = STACK_R8;
1092 ins->type = STACK_I4;
1093 ins->opcode += ovfops_op_map [src1->type];
1096 case CEE_CONV_OVF_I:
1097 case CEE_CONV_OVF_U:
1098 ins->type = STACK_PTR;
1099 ins->opcode += ovfops_op_map [src1->type];
1102 case CEE_ADD_OVF_UN:
1104 case CEE_MUL_OVF_UN:
1106 case CEE_SUB_OVF_UN:
1107 ins->type = bin_num_table [src1->type] [src2->type];
1108 ins->opcode += ovfops_op_map [src1->type];
1109 if (ins->type == STACK_R8)
1110 ins->type = STACK_INV;
1112 case OP_LOAD_MEMBASE:
1113 ins->type = STACK_PTR;
1115 case OP_LOADI1_MEMBASE:
1116 case OP_LOADU1_MEMBASE:
1117 case OP_LOADI2_MEMBASE:
1118 case OP_LOADU2_MEMBASE:
1119 case OP_LOADI4_MEMBASE:
1120 case OP_LOADU4_MEMBASE:
1121 ins->type = STACK_PTR;
1123 case OP_LOADI8_MEMBASE:
1124 ins->type = STACK_I8;
1126 case OP_LOADR4_MEMBASE:
1127 ins->type = cfg->r4_stack_type;
1129 case OP_LOADR8_MEMBASE:
1130 ins->type = STACK_R8;
1133 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1137 if (ins->type == STACK_MP)
1138 ins->klass = mono_defaults.object_class;
1143 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1149 param_table [STACK_MAX] [STACK_MAX] = {
1154 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1159 switch (args->type) {
1169 for (i = 0; i < sig->param_count; ++i) {
1170 switch (args [i].type) {
1174 if (!sig->params [i]->byref)
1178 if (sig->params [i]->byref)
1180 switch (sig->params [i]->type) {
1181 case MONO_TYPE_CLASS:
1182 case MONO_TYPE_STRING:
1183 case MONO_TYPE_OBJECT:
1184 case MONO_TYPE_SZARRAY:
1185 case MONO_TYPE_ARRAY:
1192 if (sig->params [i]->byref)
1194 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1203 /*if (!param_table [args [i].type] [sig->params [i]->type])
1211 * When we need a pointer to the current domain many times in a method, we
1212 * call mono_domain_get() once and we store the result in a local variable.
1213 * This function returns the variable that represents the MonoDomain*.
1215 inline static MonoInst *
1216 mono_get_domainvar (MonoCompile *cfg)
1218 if (!cfg->domainvar)
1219 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1220 return cfg->domainvar;
1224 * The got_var contains the address of the Global Offset Table when AOT
1228 mono_get_got_var (MonoCompile *cfg)
1230 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1232 if (!cfg->got_var) {
1233 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1235 return cfg->got_var;
1239 mono_get_vtable_var (MonoCompile *cfg)
1241 g_assert (cfg->gshared);
1243 if (!cfg->rgctx_var) {
1244 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1245 /* force the var to be stack allocated */
1246 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1249 return cfg->rgctx_var;
1253 type_from_stack_type (MonoInst *ins) {
1254 switch (ins->type) {
1255 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1256 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1257 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1258 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1259 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1261 return &ins->klass->this_arg;
1262 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1263 case STACK_VTYPE: return &ins->klass->byval_arg;
1265 g_error ("stack type %d to monotype not handled\n", ins->type);
1270 static G_GNUC_UNUSED int
1271 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1273 t = mono_type_get_underlying_type (t);
1285 case MONO_TYPE_FNPTR:
1287 case MONO_TYPE_CLASS:
1288 case MONO_TYPE_STRING:
1289 case MONO_TYPE_OBJECT:
1290 case MONO_TYPE_SZARRAY:
1291 case MONO_TYPE_ARRAY:
1297 return cfg->r4_stack_type;
1300 case MONO_TYPE_VALUETYPE:
1301 case MONO_TYPE_TYPEDBYREF:
1303 case MONO_TYPE_GENERICINST:
1304 if (mono_type_generic_inst_is_valuetype (t))
1310 g_assert_not_reached ();
1317 array_access_to_klass (int opcode)
1321 return mono_defaults.byte_class;
1323 return mono_defaults.uint16_class;
1326 return mono_defaults.int_class;
1329 return mono_defaults.sbyte_class;
1332 return mono_defaults.int16_class;
1335 return mono_defaults.int32_class;
1337 return mono_defaults.uint32_class;
1340 return mono_defaults.int64_class;
1343 return mono_defaults.single_class;
1346 return mono_defaults.double_class;
1347 case CEE_LDELEM_REF:
1348 case CEE_STELEM_REF:
1349 return mono_defaults.object_class;
1351 g_assert_not_reached ();
1357 * We try to share variables when possible
1360 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1365 /* inlining can result in deeper stacks */
1366 if (slot >= cfg->header->max_stack)
1367 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1369 pos = ins->type - 1 + slot * STACK_MAX;
1371 switch (ins->type) {
1378 if ((vnum = cfg->intvars [pos]))
1379 return cfg->varinfo [vnum];
1380 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1381 cfg->intvars [pos] = res->inst_c0;
1384 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1390 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1393 * Don't use this if a generic_context is set, since that means AOT can't
1394 * look up the method using just the image+token.
1395 * table == 0 means this is a reference made from a wrapper.
1397 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1398 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1399 jump_info_token->image = image;
1400 jump_info_token->token = token;
1401 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1406 * This function is called to handle items that are left on the evaluation stack
1407 * at basic block boundaries. What happens is that we save the values to local variables
1408 * and we reload them later when first entering the target basic block (with the
1409 * handle_loaded_temps () function).
1410 * A single joint point will use the same variables (stored in the array bb->out_stack or
1411 * bb->in_stack, if the basic block is before or after the joint point).
1413 * This function needs to be called _before_ emitting the last instruction of
1414 * the bb (i.e. before emitting a branch).
1415 * If the stack merge fails at a join point, cfg->unverifiable is set.
1418 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1421 MonoBasicBlock *bb = cfg->cbb;
1422 MonoBasicBlock *outb;
1423 MonoInst *inst, **locals;
1428 if (cfg->verbose_level > 3)
1429 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1430 if (!bb->out_scount) {
1431 bb->out_scount = count;
1432 //printf ("bblock %d has out:", bb->block_num);
1434 for (i = 0; i < bb->out_count; ++i) {
1435 outb = bb->out_bb [i];
1436 /* exception handlers are linked, but they should not be considered for stack args */
1437 if (outb->flags & BB_EXCEPTION_HANDLER)
1439 //printf (" %d", outb->block_num);
1440 if (outb->in_stack) {
1442 bb->out_stack = outb->in_stack;
1448 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1449 for (i = 0; i < count; ++i) {
1451 * try to reuse temps already allocated for this purpouse, if they occupy the same
1452 * stack slot and if they are of the same type.
1453 * This won't cause conflicts since if 'local' is used to
1454 * store one of the values in the in_stack of a bblock, then
1455 * the same variable will be used for the same outgoing stack
1457 * This doesn't work when inlining methods, since the bblocks
1458 * in the inlined methods do not inherit their in_stack from
1459 * the bblock they are inlined to. See bug #58863 for an
1462 if (cfg->inlined_method)
1463 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1465 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1470 for (i = 0; i < bb->out_count; ++i) {
1471 outb = bb->out_bb [i];
1472 /* exception handlers are linked, but they should not be considered for stack args */
1473 if (outb->flags & BB_EXCEPTION_HANDLER)
1475 if (outb->in_scount) {
1476 if (outb->in_scount != bb->out_scount) {
1477 cfg->unverifiable = TRUE;
1480 continue; /* check they are the same locals */
1482 outb->in_scount = count;
1483 outb->in_stack = bb->out_stack;
1486 locals = bb->out_stack;
1488 for (i = 0; i < count; ++i) {
1489 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1490 inst->cil_code = sp [i]->cil_code;
1491 sp [i] = locals [i];
1492 if (cfg->verbose_level > 3)
1493 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1497 * It is possible that the out bblocks already have in_stack assigned, and
1498 * the in_stacks differ. In this case, we will store to all the different
1505 /* Find a bblock which has a different in_stack */
1507 while (bindex < bb->out_count) {
1508 outb = bb->out_bb [bindex];
1509 /* exception handlers are linked, but they should not be considered for stack args */
1510 if (outb->flags & BB_EXCEPTION_HANDLER) {
1514 if (outb->in_stack != locals) {
1515 for (i = 0; i < count; ++i) {
1516 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1517 inst->cil_code = sp [i]->cil_code;
1518 sp [i] = locals [i];
1519 if (cfg->verbose_level > 3)
1520 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1522 locals = outb->in_stack;
1532 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1534 int ibitmap_reg = alloc_preg (cfg);
1535 #ifdef COMPRESSED_INTERFACE_BITMAP
1537 MonoInst *res, *ins;
1538 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1539 MONO_ADD_INS (cfg->cbb, ins);
1541 if (cfg->compile_aot)
1542 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1544 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1545 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1546 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1548 int ibitmap_byte_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1552 if (cfg->compile_aot) {
1553 int iid_reg = alloc_preg (cfg);
1554 int shifted_iid_reg = alloc_preg (cfg);
1555 int ibitmap_byte_address_reg = alloc_preg (cfg);
1556 int masked_iid_reg = alloc_preg (cfg);
1557 int iid_one_bit_reg = alloc_preg (cfg);
1558 int iid_bit_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1561 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1564 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1565 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1566 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1568 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1575 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1576 * stored in "klass_reg" implements the interface "klass".
1579 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1581 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1585 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1586 * stored in "vtable_reg" implements the interface "klass".
1589 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1591 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1595 * Emit code which checks whenever the interface id of @klass is smaller than
1596 * than the value given by max_iid_reg.
1599 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1600 MonoBasicBlock *false_target)
1602 if (cfg->compile_aot) {
1603 int iid_reg = alloc_preg (cfg);
1604 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1605 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1610 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1612 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1615 /* Same as above, but obtains max_iid from a vtable */
1617 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1618 MonoBasicBlock *false_target)
1620 int max_iid_reg = alloc_preg (cfg);
1622 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1623 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1626 /* Same as above, but obtains max_iid from a klass */
1628 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1629 MonoBasicBlock *false_target)
1631 int max_iid_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1634 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1638 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1640 int idepth_reg = alloc_preg (cfg);
1641 int stypes_reg = alloc_preg (cfg);
1642 int stype = alloc_preg (cfg);
1644 mono_class_setup_supertypes (klass);
1646 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1649 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1654 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1655 } else if (cfg->compile_aot) {
1656 int const_reg = alloc_preg (cfg);
1657 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1658 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1662 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1666 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1668 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1672 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1674 int intf_reg = alloc_preg (cfg);
1676 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1677 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1682 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1686 * Variant of the above that takes a register to the class, not the vtable.
1689 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1691 int intf_bit_reg = alloc_preg (cfg);
1693 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1694 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1699 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1703 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1706 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1707 } else if (cfg->compile_aot) {
1708 int const_reg = alloc_preg (cfg);
1709 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1710 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1714 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1718 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1720 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1724 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1726 if (cfg->compile_aot) {
1727 int const_reg = alloc_preg (cfg);
1728 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1729 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1733 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1737 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1740 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1743 int rank_reg = alloc_preg (cfg);
1744 int eclass_reg = alloc_preg (cfg);
1746 g_assert (!klass_inst);
1747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1749 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1750 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1752 if (klass->cast_class == mono_defaults.object_class) {
1753 int parent_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1755 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1756 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1757 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1758 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1759 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1760 } else if (klass->cast_class == mono_defaults.enum_class) {
1761 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1762 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1763 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1765 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1766 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1769 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1770 /* Check that the object is a vector too */
1771 int bounds_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1773 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1774 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1777 int idepth_reg = alloc_preg (cfg);
1778 int stypes_reg = alloc_preg (cfg);
1779 int stype = alloc_preg (cfg);
1781 mono_class_setup_supertypes (klass);
1783 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1786 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1790 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1795 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1797 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1801 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1805 g_assert (val == 0);
1810 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1813 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1816 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1819 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1821 #if SIZEOF_REGISTER == 8
1823 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1829 val_reg = alloc_preg (cfg);
1831 if (SIZEOF_REGISTER == 8)
1832 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1834 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1837 /* This could be optimized further if neccesary */
1839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1846 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1860 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1877 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1884 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1885 g_assert (size < 10000);
1888 /* This could be optimized further if neccesary */
1890 cur_reg = alloc_preg (cfg);
1891 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1892 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1899 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1901 cur_reg = alloc_preg (cfg);
1902 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1903 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1911 cur_reg = alloc_preg (cfg);
1912 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1913 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1919 cur_reg = alloc_preg (cfg);
1920 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1921 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1937 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1941 if (cfg->compile_aot) {
1942 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1943 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1945 ins->sreg2 = c->dreg;
1946 MONO_ADD_INS (cfg->cbb, ins);
1948 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1950 ins->inst_offset = mini_get_tls_offset (tls_key);
1951 MONO_ADD_INS (cfg->cbb, ins);
1958 * Emit IR to push the current LMF onto the LMF stack.
1961 emit_push_lmf (MonoCompile *cfg)
1964 * Emit IR to push the LMF:
1965 * lmf_addr = <lmf_addr from tls>
1966 * lmf->lmf_addr = lmf_addr
1967 * lmf->prev_lmf = *lmf_addr
1970 int lmf_reg, prev_lmf_reg;
1971 MonoInst *ins, *lmf_ins;
1976 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1977 /* Load current lmf */
1978 lmf_ins = mono_get_lmf_intrinsic (cfg);
1980 MONO_ADD_INS (cfg->cbb, lmf_ins);
1981 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1982 lmf_reg = ins->dreg;
1983 /* Save previous_lmf */
1984 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1986 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1989 * Store lmf_addr in a variable, so it can be allocated to a global register.
1991 if (!cfg->lmf_addr_var)
1992 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1995 ins = mono_get_jit_tls_intrinsic (cfg);
1997 int jit_tls_dreg = ins->dreg;
1999 MONO_ADD_INS (cfg->cbb, ins);
2000 lmf_reg = alloc_preg (cfg);
2001 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2003 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2006 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2008 MONO_ADD_INS (cfg->cbb, lmf_ins);
2011 MonoInst *args [16], *jit_tls_ins, *ins;
2013 /* Inline mono_get_lmf_addr () */
2014 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2016 /* Load mono_jit_tls_id */
2017 if (cfg->compile_aot)
2018 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2020 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2021 /* call pthread_getspecific () */
2022 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2023 /* lmf_addr = &jit_tls->lmf */
2024 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2027 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2031 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2033 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2034 lmf_reg = ins->dreg;
2036 prev_lmf_reg = alloc_preg (cfg);
2037 /* Save previous_lmf */
2038 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2039 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2041 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2048 * Emit IR to pop the current LMF from the LMF stack.
2051 emit_pop_lmf (MonoCompile *cfg)
2053 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2059 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2060 lmf_reg = ins->dreg;
2062 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2063 /* Load previous_lmf */
2064 prev_lmf_reg = alloc_preg (cfg);
2065 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2067 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2070 * Emit IR to pop the LMF:
2071 * *(lmf->lmf_addr) = lmf->prev_lmf
2073 /* This could be called before emit_push_lmf () */
2074 if (!cfg->lmf_addr_var)
2075 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2076 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2078 prev_lmf_reg = alloc_preg (cfg);
2079 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2080 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2085 emit_instrumentation_call (MonoCompile *cfg, void *func)
2087 MonoInst *iargs [1];
2090 * Avoid instrumenting inlined methods since it can
2091 * distort profiling results.
2093 if (cfg->method != cfg->current_method)
2096 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2097 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2098 mono_emit_jit_icall (cfg, func, iargs);
2103 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2106 type = mini_get_underlying_type (type);
2107 switch (type->type) {
2108 case MONO_TYPE_VOID:
2109 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2116 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2120 case MONO_TYPE_FNPTR:
2121 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2122 case MONO_TYPE_CLASS:
2123 case MONO_TYPE_STRING:
2124 case MONO_TYPE_OBJECT:
2125 case MONO_TYPE_SZARRAY:
2126 case MONO_TYPE_ARRAY:
2127 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2130 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2133 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2135 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2137 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2138 case MONO_TYPE_VALUETYPE:
2139 if (type->data.klass->enumtype) {
2140 type = mono_class_enum_basetype (type->data.klass);
2143 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2144 case MONO_TYPE_TYPEDBYREF:
2145 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2146 case MONO_TYPE_GENERICINST:
2147 type = &type->data.generic_class->container_class->byval_arg;
2150 case MONO_TYPE_MVAR:
2152 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2154 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2160 * target_type_is_incompatible:
2161 * @cfg: MonoCompile context
2163 * Check that the item @arg on the evaluation stack can be stored
2164 * in the target type (can be a local, or field, etc).
2165 * The cfg arg can be used to check if we need verification or just
2168 * Returns: non-0 value if arg can't be stored on a target.
2171 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2173 MonoType *simple_type;
2176 if (target->byref) {
2177 /* FIXME: check that the pointed to types match */
2178 if (arg->type == STACK_MP)
2179 return target->type != MONO_TYPE_I && arg->klass != mono_class_from_mono_type (target);
2180 if (arg->type == STACK_PTR)
2185 simple_type = mini_get_underlying_type (target);
2186 switch (simple_type->type) {
2187 case MONO_TYPE_VOID:
2195 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2199 /* STACK_MP is needed when setting pinned locals */
2200 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2205 case MONO_TYPE_FNPTR:
2207 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2208 * in native int. (#688008).
2210 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2213 case MONO_TYPE_CLASS:
2214 case MONO_TYPE_STRING:
2215 case MONO_TYPE_OBJECT:
2216 case MONO_TYPE_SZARRAY:
2217 case MONO_TYPE_ARRAY:
2218 if (arg->type != STACK_OBJ)
2220 /* FIXME: check type compatibility */
2224 if (arg->type != STACK_I8)
2228 if (arg->type != cfg->r4_stack_type)
2232 if (arg->type != STACK_R8)
2235 case MONO_TYPE_VALUETYPE:
2236 if (arg->type != STACK_VTYPE)
2238 klass = mono_class_from_mono_type (simple_type);
2239 if (klass != arg->klass)
2242 case MONO_TYPE_TYPEDBYREF:
2243 if (arg->type != STACK_VTYPE)
2245 klass = mono_class_from_mono_type (simple_type);
2246 if (klass != arg->klass)
2249 case MONO_TYPE_GENERICINST:
2250 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2251 if (arg->type != STACK_VTYPE)
2253 klass = mono_class_from_mono_type (simple_type);
2254 /* The second cases is needed when doing partial sharing */
2255 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2259 if (arg->type != STACK_OBJ)
2261 /* FIXME: check type compatibility */
2265 case MONO_TYPE_MVAR:
2266 g_assert (cfg->gshared);
2267 if (mini_type_var_is_vt (simple_type)) {
2268 if (arg->type != STACK_VTYPE)
2271 if (arg->type != STACK_OBJ)
2276 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2282 * Prepare arguments for passing to a function call.
2283 * Return a non-zero value if the arguments can't be passed to the given
2285 * The type checks are not yet complete and some conversions may need
2286 * casts on 32 or 64 bit architectures.
2288 * FIXME: implement this using target_type_is_incompatible ()
2291 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2293 MonoType *simple_type;
2297 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2301 for (i = 0; i < sig->param_count; ++i) {
2302 if (sig->params [i]->byref) {
2303 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2307 simple_type = mini_get_underlying_type (sig->params [i]);
2309 switch (simple_type->type) {
2310 case MONO_TYPE_VOID:
2319 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2325 case MONO_TYPE_FNPTR:
2326 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2329 case MONO_TYPE_CLASS:
2330 case MONO_TYPE_STRING:
2331 case MONO_TYPE_OBJECT:
2332 case MONO_TYPE_SZARRAY:
2333 case MONO_TYPE_ARRAY:
2334 if (args [i]->type != STACK_OBJ)
2339 if (args [i]->type != STACK_I8)
2343 if (args [i]->type != cfg->r4_stack_type)
2347 if (args [i]->type != STACK_R8)
2350 case MONO_TYPE_VALUETYPE:
2351 if (simple_type->data.klass->enumtype) {
2352 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2355 if (args [i]->type != STACK_VTYPE)
2358 case MONO_TYPE_TYPEDBYREF:
2359 if (args [i]->type != STACK_VTYPE)
2362 case MONO_TYPE_GENERICINST:
2363 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2366 case MONO_TYPE_MVAR:
2368 if (args [i]->type != STACK_VTYPE)
2372 g_error ("unknown type 0x%02x in check_call_signature",
2380 callvirt_to_call (int opcode)
2383 case OP_CALL_MEMBASE:
2385 case OP_VOIDCALL_MEMBASE:
2387 case OP_FCALL_MEMBASE:
2389 case OP_RCALL_MEMBASE:
2391 case OP_VCALL_MEMBASE:
2393 case OP_LCALL_MEMBASE:
2396 g_assert_not_reached ();
2403 callvirt_to_call_reg (int opcode)
2406 case OP_CALL_MEMBASE:
2408 case OP_VOIDCALL_MEMBASE:
2409 return OP_VOIDCALL_REG;
2410 case OP_FCALL_MEMBASE:
2411 return OP_FCALL_REG;
2412 case OP_RCALL_MEMBASE:
2413 return OP_RCALL_REG;
2414 case OP_VCALL_MEMBASE:
2415 return OP_VCALL_REG;
2416 case OP_LCALL_MEMBASE:
2417 return OP_LCALL_REG;
2419 g_assert_not_reached ();
2425 /* Either METHOD or IMT_ARG needs to be set */
2427 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2431 if (COMPILE_LLVM (cfg)) {
2432 method_reg = alloc_preg (cfg);
2435 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2436 } else if (cfg->compile_aot) {
2437 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2440 MONO_INST_NEW (cfg, ins, OP_PCONST);
2441 ins->inst_p0 = method;
2442 ins->dreg = method_reg;
2443 MONO_ADD_INS (cfg->cbb, ins);
2447 call->imt_arg_reg = method_reg;
2449 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2453 method_reg = alloc_preg (cfg);
2456 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2457 } else if (cfg->compile_aot) {
2458 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2461 MONO_INST_NEW (cfg, ins, OP_PCONST);
2462 ins->inst_p0 = method;
2463 ins->dreg = method_reg;
2464 MONO_ADD_INS (cfg->cbb, ins);
2467 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2470 static MonoJumpInfo *
2471 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2473 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2477 ji->data.target = target;
2483 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2486 return mono_class_check_context_used (klass);
2492 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2495 return mono_method_check_context_used (method);
2501 * check_method_sharing:
2503 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2506 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2508 gboolean pass_vtable = FALSE;
2509 gboolean pass_mrgctx = FALSE;
2511 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2512 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2513 gboolean sharable = FALSE;
2515 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2519 * Pass vtable iff target method might
2520 * be shared, which means that sharing
2521 * is enabled for its class and its
2522 * context is sharable (and it's not a
2525 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2529 if (mini_method_get_context (cmethod) &&
2530 mini_method_get_context (cmethod)->method_inst) {
2531 g_assert (!pass_vtable);
2533 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2536 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2541 if (out_pass_vtable)
2542 *out_pass_vtable = pass_vtable;
2543 if (out_pass_mrgctx)
2544 *out_pass_mrgctx = pass_mrgctx;
2547 inline static MonoCallInst *
2548 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2549 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2553 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2561 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2563 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2565 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
2568 call->signature = sig;
2569 call->rgctx_reg = rgctx;
2570 sig_ret = mini_get_underlying_type (sig->ret);
2572 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2575 if (mini_type_is_vtype (sig_ret)) {
2576 call->vret_var = cfg->vret_addr;
2577 //g_assert_not_reached ();
2579 } else if (mini_type_is_vtype (sig_ret)) {
2580 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2583 temp->backend.is_pinvoke = sig->pinvoke;
2586 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2587 * address of return value to increase optimization opportunities.
2588 * Before vtype decomposition, the dreg of the call ins itself represents the
2589 * fact the call modifies the return value. After decomposition, the call will
2590 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2591 * will be transformed into an LDADDR.
2593 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2594 loada->dreg = alloc_preg (cfg);
2595 loada->inst_p0 = temp;
2596 /* We reference the call too since call->dreg could change during optimization */
2597 loada->inst_p1 = call;
2598 MONO_ADD_INS (cfg->cbb, loada);
2600 call->inst.dreg = temp->dreg;
2602 call->vret_var = loada;
2603 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2604 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2606 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2607 if (COMPILE_SOFT_FLOAT (cfg)) {
2609 * If the call has a float argument, we would need to do an r8->r4 conversion using
2610 * an icall, but that cannot be done during the call sequence since it would clobber
2611 * the call registers + the stack. So we do it before emitting the call.
2613 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2615 MonoInst *in = call->args [i];
2617 if (i >= sig->hasthis)
2618 t = sig->params [i - sig->hasthis];
2620 t = &mono_defaults.int_class->byval_arg;
2621 t = mono_type_get_underlying_type (t);
2623 if (!t->byref && t->type == MONO_TYPE_R4) {
2624 MonoInst *iargs [1];
2628 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2630 /* The result will be in an int vreg */
2631 call->args [i] = conv;
2637 call->need_unbox_trampoline = unbox_trampoline;
2640 if (COMPILE_LLVM (cfg))
2641 mono_llvm_emit_call (cfg, call);
2643 mono_arch_emit_call (cfg, call);
2645 mono_arch_emit_call (cfg, call);
2648 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2649 cfg->flags |= MONO_CFG_HAS_CALLS;
2655 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2657 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2658 cfg->uses_rgctx_reg = TRUE;
2659 call->rgctx_reg = TRUE;
2661 call->rgctx_arg_reg = rgctx_reg;
2665 inline static MonoInst*
2666 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2671 gboolean check_sp = FALSE;
2673 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2674 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2676 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2681 rgctx_reg = mono_alloc_preg (cfg);
2682 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2686 if (!cfg->stack_inbalance_var)
2687 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2689 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2690 ins->dreg = cfg->stack_inbalance_var->dreg;
2691 MONO_ADD_INS (cfg->cbb, ins);
2694 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2696 call->inst.sreg1 = addr->dreg;
2699 emit_imt_argument (cfg, call, NULL, imt_arg);
2701 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2706 sp_reg = mono_alloc_preg (cfg);
2708 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2710 MONO_ADD_INS (cfg->cbb, ins);
2712 /* Restore the stack so we don't crash when throwing the exception */
2713 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2714 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2715 MONO_ADD_INS (cfg->cbb, ins);
2717 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2718 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2722 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2724 return (MonoInst*)call;
2728 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2731 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2733 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2736 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2737 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2739 #ifndef DISABLE_REMOTING
2740 gboolean might_be_remote = FALSE;
2742 gboolean virtual = this_ins != NULL;
2743 gboolean enable_for_aot = TRUE;
2746 MonoInst *call_target = NULL;
2748 gboolean need_unbox_trampoline;
2751 sig = mono_method_signature (method);
2753 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
2754 MonoInst *icall_args [16];
2757 // FIXME: Optimize this
2759 guint32 imt_slot = mono_method_get_imt_slot (method);
2761 icall_args [0] = this_ins;
2762 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
2764 icall_args [2] = imt_arg;
2766 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, method);
2767 icall_args [2] = ins;
2769 EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
2771 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
2775 rgctx_reg = mono_alloc_preg (cfg);
2776 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2779 if (method->string_ctor) {
2780 /* Create the real signature */
2781 /* FIXME: Cache these */
2782 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2783 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2788 context_used = mini_method_check_context_used (cfg, method);
2790 #ifndef DISABLE_REMOTING
2791 might_be_remote = this_ins && sig->hasthis &&
2792 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2793 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2795 if (might_be_remote && context_used) {
2798 g_assert (cfg->gshared);
2800 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2802 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2806 if (cfg->llvm_only && !call_target && virtual && (method->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
2807 // FIXME: Vcall optimizations below
2808 MonoInst *icall_args [16];
2811 if (sig->generic_param_count) {
2813 * Generic virtual call, pass the concrete method as the imt argument.
2815 imt_arg = emit_get_rgctx_method (cfg, context_used,
2816 method, MONO_RGCTX_INFO_METHOD);
2819 // FIXME: Optimize this
2821 int slot = mono_method_get_vtable_index (method);
2823 icall_args [0] = this_ins;
2824 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
2826 icall_args [2] = imt_arg;
2828 EMIT_NEW_PCONST (cfg, ins, NULL);
2829 icall_args [2] = ins;
2831 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall, icall_args);
2834 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2836 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2838 #ifndef DISABLE_REMOTING
2839 if (might_be_remote)
2840 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2843 call->method = method;
2844 call->inst.flags |= MONO_INST_HAS_METHOD;
2845 call->inst.inst_left = this_ins;
2846 call->tail_call = tail;
2849 int vtable_reg, slot_reg, this_reg;
2852 this_reg = this_ins->dreg;
2854 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2855 MonoInst *dummy_use;
2857 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2859 /* Make a call to delegate->invoke_impl */
2860 call->inst.inst_basereg = this_reg;
2861 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2862 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2864 /* We must emit a dummy use here because the delegate trampoline will
2865 replace the 'this' argument with the delegate target making this activation
2866 no longer a root for the delegate.
2867 This is an issue for delegates that target collectible code such as dynamic
2868 methods of GC'able assemblies.
2870 For a test case look into #667921.
2872 FIXME: a dummy use is not the best way to do it as the local register allocator
2873 will put it on a caller save register and spil it around the call.
2874 Ideally, we would either put it on a callee save register or only do the store part.
2876 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2878 return (MonoInst*)call;
2881 if ((!cfg->compile_aot || enable_for_aot) &&
2882 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2883 (MONO_METHOD_IS_FINAL (method) &&
2884 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2885 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2887 * the method is not virtual, we just need to ensure this is not null
2888 * and then we can call the method directly.
2890 #ifndef DISABLE_REMOTING
2891 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2893 * The check above ensures method is not gshared, this is needed since
2894 * gshared methods can't have wrappers.
2896 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2900 if (!method->string_ctor)
2901 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2903 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2904 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2906 * the method is virtual, but we can statically dispatch since either
2907 * it's class or the method itself are sealed.
2908 * But first we need to ensure it's not a null reference.
2910 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2912 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2913 } else if (call_target) {
2914 vtable_reg = alloc_preg (cfg);
2915 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2917 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2918 call->inst.sreg1 = call_target->dreg;
2919 call->inst.flags &= !MONO_INST_HAS_METHOD;
2921 vtable_reg = alloc_preg (cfg);
2922 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2923 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2924 guint32 imt_slot = mono_method_get_imt_slot (method);
2925 emit_imt_argument (cfg, call, call->method, imt_arg);
2926 slot_reg = vtable_reg;
2927 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2929 slot_reg = vtable_reg;
2930 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2931 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2933 g_assert (mono_method_signature (method)->generic_param_count);
2934 emit_imt_argument (cfg, call, call->method, imt_arg);
2938 call->inst.sreg1 = slot_reg;
2939 call->inst.inst_offset = offset;
2940 call->is_virtual = TRUE;
2944 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2947 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2949 return (MonoInst*)call;
2953 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2955 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2959 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2966 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2969 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2971 return (MonoInst*)call;
2975 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2977 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2981 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2985 * mono_emit_abs_call:
2987 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2989 inline static MonoInst*
2990 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2991 MonoMethodSignature *sig, MonoInst **args)
2993 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2997 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
3000 if (cfg->abs_patches == NULL)
3001 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3002 g_hash_table_insert (cfg->abs_patches, ji, ji);
3003 ins = mono_emit_native_call (cfg, ji, sig, args);
3004 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3009 direct_icalls_enabled (MonoCompile *cfg)
3011 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3013 if (cfg->compile_llvm)
3016 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3022 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3025 * Call the jit icall without a wrapper if possible.
3026 * The wrapper is needed for the following reasons:
3027 * - to handle exceptions thrown using mono_raise_exceptions () from the
3028 * icall function. The EH code needs the lmf frame pushed by the
3029 * wrapper to be able to unwind back to managed code.
3030 * - to be able to do stack walks for asynchronously suspended
3031 * threads when debugging.
3033 if (info->no_raise && direct_icalls_enabled (cfg)) {
3037 if (!info->wrapper_method) {
3038 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3039 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3041 mono_memory_barrier ();
3045 * Inline the wrapper method, which is basically a call to the C icall, and
3046 * an exception check.
3048 costs = inline_method (cfg, info->wrapper_method, NULL,
3049 args, NULL, cfg->real_offset, TRUE);
3050 g_assert (costs > 0);
3051 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3055 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3060 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3062 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3063 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3067 * Native code might return non register sized integers
3068 * without initializing the upper bits.
3070 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3071 case OP_LOADI1_MEMBASE:
3072 widen_op = OP_ICONV_TO_I1;
3074 case OP_LOADU1_MEMBASE:
3075 widen_op = OP_ICONV_TO_U1;
3077 case OP_LOADI2_MEMBASE:
3078 widen_op = OP_ICONV_TO_I2;
3080 case OP_LOADU2_MEMBASE:
3081 widen_op = OP_ICONV_TO_U2;
3087 if (widen_op != -1) {
3088 int dreg = alloc_preg (cfg);
3091 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3092 widen->type = ins->type;
3102 get_memcpy_method (void)
3104 static MonoMethod *memcpy_method = NULL;
3105 if (!memcpy_method) {
3106 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3108 g_error ("Old corlib found. Install a new one");
3110 return memcpy_method;
3114 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3116 MonoClassField *field;
3117 gpointer iter = NULL;
3119 while ((field = mono_class_get_fields (klass, &iter))) {
3122 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3124 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3125 if (mini_type_is_reference (mono_field_get_type (field))) {
3126 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3127 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3129 MonoClass *field_class = mono_class_from_mono_type (field->type);
3130 if (field_class->has_references)
3131 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3137 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3139 int card_table_shift_bits;
3140 gpointer card_table_mask;
3142 MonoInst *dummy_use;
3143 int nursery_shift_bits;
3144 size_t nursery_size;
3146 if (!cfg->gen_write_barriers)
3149 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3151 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3153 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3156 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3157 wbarrier->sreg1 = ptr->dreg;
3158 wbarrier->sreg2 = value->dreg;
3159 MONO_ADD_INS (cfg->cbb, wbarrier);
3160 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3161 int offset_reg = alloc_preg (cfg);
3162 int card_reg = alloc_preg (cfg);
3165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3166 if (card_table_mask)
3167 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3169 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3170 * IMM's larger than 32bits.
3172 if (cfg->compile_aot) {
3173 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3175 MONO_INST_NEW (cfg, ins, OP_PCONST);
3176 ins->inst_p0 = card_table;
3177 ins->dreg = card_reg;
3178 MONO_ADD_INS (cfg->cbb, ins);
3181 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3182 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3184 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3185 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3188 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3192 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3194 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3195 unsigned need_wb = 0;
3200 /*types with references can't have alignment smaller than sizeof(void*) */
3201 if (align < SIZEOF_VOID_P)
3204 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3205 if (size > 32 * SIZEOF_VOID_P)
3208 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3210 /* We don't unroll more than 5 stores to avoid code bloat. */
3211 if (size > 5 * SIZEOF_VOID_P) {
3212 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3213 size += (SIZEOF_VOID_P - 1);
3214 size &= ~(SIZEOF_VOID_P - 1);
3216 EMIT_NEW_ICONST (cfg, iargs [2], size);
3217 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3218 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3222 destreg = iargs [0]->dreg;
3223 srcreg = iargs [1]->dreg;
3226 dest_ptr_reg = alloc_preg (cfg);
3227 tmp_reg = alloc_preg (cfg);
3230 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3232 while (size >= SIZEOF_VOID_P) {
3233 MonoInst *load_inst;
3234 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3235 load_inst->dreg = tmp_reg;
3236 load_inst->inst_basereg = srcreg;
3237 load_inst->inst_offset = offset;
3238 MONO_ADD_INS (cfg->cbb, load_inst);
3240 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3243 emit_write_barrier (cfg, iargs [0], load_inst);
3245 offset += SIZEOF_VOID_P;
3246 size -= SIZEOF_VOID_P;
3249 /*tmp += sizeof (void*)*/
3250 if (size >= SIZEOF_VOID_P) {
3251 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3252 MONO_ADD_INS (cfg->cbb, iargs [0]);
3256 /* Those cannot be references since size < sizeof (void*) */
3258 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3259 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3265 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3266 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3272 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3273 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3282 * Emit code to copy a valuetype of type @klass whose address is stored in
3283 * @src->dreg to memory whose address is stored at @dest->dreg.
3286 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3288 MonoInst *iargs [4];
3291 MonoMethod *memcpy_method;
3292 MonoInst *size_ins = NULL;
3293 MonoInst *memcpy_ins = NULL;
3297 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3300 * This check breaks with spilled vars... need to handle it during verification anyway.
3301 * g_assert (klass && klass == src->klass && klass == dest->klass);
3304 if (mini_is_gsharedvt_klass (klass)) {
3306 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3307 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3311 n = mono_class_native_size (klass, &align);
3313 n = mono_class_value_size (klass, &align);
3315 /* if native is true there should be no references in the struct */
3316 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3317 /* Avoid barriers when storing to the stack */
3318 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3319 (dest->opcode == OP_LDADDR))) {
3325 context_used = mini_class_check_context_used (cfg, klass);
3327 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3328 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3330 } else if (context_used) {
3331 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3333 if (cfg->compile_aot) {
3334 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3336 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3337 mono_class_compute_gc_descriptor (klass);
3342 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3344 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3349 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3350 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3351 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3356 iargs [2] = size_ins;
3358 EMIT_NEW_ICONST (cfg, iargs [2], n);
3360 memcpy_method = get_memcpy_method ();
3362 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3364 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3369 get_memset_method (void)
3371 static MonoMethod *memset_method = NULL;
3372 if (!memset_method) {
3373 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3375 g_error ("Old corlib found. Install a new one");
3377 return memset_method;
3381 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3383 MonoInst *iargs [3];
3386 MonoMethod *memset_method;
3387 MonoInst *size_ins = NULL;
3388 MonoInst *bzero_ins = NULL;
3389 static MonoMethod *bzero_method;
3391 /* FIXME: Optimize this for the case when dest is an LDADDR */
3392 mono_class_init (klass);
3393 if (mini_is_gsharedvt_klass (klass)) {
3394 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3395 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3397 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3398 g_assert (bzero_method);
3400 iargs [1] = size_ins;
3401 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3405 n = mono_class_value_size (klass, &align);
3407 if (n <= sizeof (gpointer) * 8) {
3408 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3411 memset_method = get_memset_method ();
3413 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3414 EMIT_NEW_ICONST (cfg, iargs [2], n);
3415 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3422 * Emit IR to return either the this pointer for instance method,
3423 * or the mrgctx for static methods.
3426 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3428 MonoInst *this_ins = NULL;
3430 g_assert (cfg->gshared);
3432 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3433 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3434 !method->klass->valuetype)
3435 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3437 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3438 MonoInst *mrgctx_loc, *mrgctx_var;
3440 g_assert (!this_ins);
3441 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3443 mrgctx_loc = mono_get_vtable_var (cfg);
3444 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3447 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3448 MonoInst *vtable_loc, *vtable_var;
3450 g_assert (!this_ins);
3452 vtable_loc = mono_get_vtable_var (cfg);
3453 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3455 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3456 MonoInst *mrgctx_var = vtable_var;
3459 vtable_reg = alloc_preg (cfg);
3460 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3461 vtable_var->type = STACK_PTR;
3469 vtable_reg = alloc_preg (cfg);
3470 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3475 static MonoJumpInfoRgctxEntry *
3476 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3478 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3479 res->method = method;
3480 res->in_mrgctx = in_mrgctx;
3481 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3482 res->data->type = patch_type;
3483 res->data->data.target = patch_data;
3484 res->info_type = info_type;
3489 static inline MonoInst*
3490 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3492 MonoInst *args [16];
3495 // FIXME: No fastpath since the slot is not a compile time constant
3497 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3498 if (entry->in_mrgctx)
3499 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3501 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3505 * FIXME: This can be called during decompose, which is a problem since it creates
3507 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3509 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3511 MonoBasicBlock *is_null_bb, *end_bb;
3512 MonoInst *res, *ins, *call;
3515 slot = mini_get_rgctx_entry_slot (entry);
3517 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3518 index = MONO_RGCTX_SLOT_INDEX (slot);
3520 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3521 for (depth = 0; ; ++depth) {
3522 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3524 if (index < size - 1)
3529 NEW_BBLOCK (cfg, end_bb);
3530 NEW_BBLOCK (cfg, is_null_bb);
3533 rgctx_reg = rgctx->dreg;
3535 rgctx_reg = alloc_preg (cfg);
3537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3538 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3539 NEW_BBLOCK (cfg, is_null_bb);
3541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3542 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3545 for (i = 0; i < depth; ++i) {
3546 int array_reg = alloc_preg (cfg);
3548 /* load ptr to next array */
3549 if (mrgctx && i == 0)
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3553 rgctx_reg = array_reg;
3554 /* is the ptr null? */
3555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3556 /* if yes, jump to actual trampoline */
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3561 val_reg = alloc_preg (cfg);
3562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3563 /* is the slot null? */
3564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3565 /* if yes, jump to actual trampoline */
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3569 res_reg = alloc_preg (cfg);
3570 MONO_INST_NEW (cfg, ins, OP_MOVE);
3571 ins->dreg = res_reg;
3572 ins->sreg1 = val_reg;
3573 MONO_ADD_INS (cfg->cbb, ins);
3575 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3578 MONO_START_BB (cfg, is_null_bb);
3580 EMIT_NEW_ICONST (cfg, args [1], index);
3582 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3584 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3585 MONO_INST_NEW (cfg, ins, OP_MOVE);
3586 ins->dreg = res_reg;
3587 ins->sreg1 = call->dreg;
3588 MONO_ADD_INS (cfg->cbb, ins);
3589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3591 MONO_START_BB (cfg, end_bb);
3600 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3603 static inline MonoInst*
3604 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3607 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3609 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3613 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3614 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3616 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3617 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3619 return emit_rgctx_fetch (cfg, rgctx, entry);
3623 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3624 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3626 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3627 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3629 return emit_rgctx_fetch (cfg, rgctx, entry);
3633 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3634 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3636 MonoJumpInfoGSharedVtCall *call_info;
3637 MonoJumpInfoRgctxEntry *entry;
3640 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3641 call_info->sig = sig;
3642 call_info->method = cmethod;
3644 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3645 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3647 return emit_rgctx_fetch (cfg, rgctx, entry);
3651 * emit_get_rgctx_virt_method:
3653 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3656 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3657 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3659 MonoJumpInfoVirtMethod *info;
3660 MonoJumpInfoRgctxEntry *entry;
3663 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3664 info->klass = klass;
3665 info->method = virt_method;
3667 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3668 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3670 return emit_rgctx_fetch (cfg, rgctx, entry);
3674 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3675 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3677 MonoJumpInfoRgctxEntry *entry;
3680 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3681 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3683 return emit_rgctx_fetch (cfg, rgctx, entry);
3687 * emit_get_rgctx_method:
3689 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3690 * normal constants, else emit a load from the rgctx.
3693 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3694 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3696 if (!context_used) {
3699 switch (rgctx_type) {
3700 case MONO_RGCTX_INFO_METHOD:
3701 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3703 case MONO_RGCTX_INFO_METHOD_RGCTX:
3704 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3707 g_assert_not_reached ();
3710 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3711 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3713 return emit_rgctx_fetch (cfg, rgctx, entry);
3718 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3719 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3721 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3722 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3724 return emit_rgctx_fetch (cfg, rgctx, entry);
3728 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3730 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3731 MonoRuntimeGenericContextInfoTemplate *template;
3736 for (i = 0; i < info->num_entries; ++i) {
3737 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3739 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3743 if (info->num_entries == info->count_entries) {
3744 MonoRuntimeGenericContextInfoTemplate *new_entries;
3745 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3747 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3749 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3750 info->entries = new_entries;
3751 info->count_entries = new_count_entries;
3754 idx = info->num_entries;
3755 template = &info->entries [idx];
3756 template->info_type = rgctx_type;
3757 template->data = data;
3759 info->num_entries ++;
3765 * emit_get_gsharedvt_info:
3767 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3770 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3775 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3776 /* Load info->entries [idx] */
3777 dreg = alloc_preg (cfg);
3778 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3784 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3786 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3790 * On return the caller must check @klass for load errors.
3793 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3795 MonoInst *vtable_arg;
3798 context_used = mini_class_check_context_used (cfg, klass);
3801 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3802 klass, MONO_RGCTX_INFO_VTABLE);
3804 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3808 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3811 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3815 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3816 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3818 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3819 ins->sreg1 = vtable_arg->dreg;
3820 MONO_ADD_INS (cfg->cbb, ins);
3822 static int byte_offset = -1;
3823 static guint8 bitmask;
3824 int bits_reg, inited_reg;
3825 MonoBasicBlock *inited_bb;
3826 MonoInst *args [16];
3828 if (byte_offset < 0)
3829 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3831 bits_reg = alloc_ireg (cfg);
3832 inited_reg = alloc_ireg (cfg);
3834 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3837 NEW_BBLOCK (cfg, inited_bb);
3839 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3840 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3842 args [0] = vtable_arg;
3843 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3845 MONO_START_BB (cfg, inited_bb);
3850 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3854 if (cfg->gen_seq_points && cfg->method == method) {
3855 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3857 ins->flags |= MONO_INST_NONEMPTY_STACK;
3858 MONO_ADD_INS (cfg->cbb, ins);
3863 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3865 if (mini_get_debug_options ()->better_cast_details) {
3866 int vtable_reg = alloc_preg (cfg);
3867 int klass_reg = alloc_preg (cfg);
3868 MonoBasicBlock *is_null_bb = NULL;
3870 int to_klass_reg, context_used;
3873 NEW_BBLOCK (cfg, is_null_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3879 tls_get = mono_get_jit_tls_intrinsic (cfg);
3881 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3885 MONO_ADD_INS (cfg->cbb, tls_get);
3886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3889 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3891 context_used = mini_class_check_context_used (cfg, klass);
3893 MonoInst *class_ins;
3895 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3896 to_klass_reg = class_ins->dreg;
3898 to_klass_reg = alloc_preg (cfg);
3899 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3904 MONO_START_BB (cfg, is_null_bb);
3909 reset_cast_details (MonoCompile *cfg)
3911 /* Reset the variables holding the cast details */
3912 if (mini_get_debug_options ()->better_cast_details) {
3913 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3915 MONO_ADD_INS (cfg->cbb, tls_get);
3916 /* It is enough to reset the from field */
3917 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3922 * On return the caller must check @array_class for load errors
3925 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3927 int vtable_reg = alloc_preg (cfg);
3930 context_used = mini_class_check_context_used (cfg, array_class);
3932 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3934 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3936 if (cfg->opt & MONO_OPT_SHARED) {
3937 int class_reg = alloc_preg (cfg);
3938 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3939 if (cfg->compile_aot) {
3940 int klass_reg = alloc_preg (cfg);
3941 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3942 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3944 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3946 } else if (context_used) {
3947 MonoInst *vtable_ins;
3949 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3950 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3952 if (cfg->compile_aot) {
3956 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3958 vt_reg = alloc_preg (cfg);
3959 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3960 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3963 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3965 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3969 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3971 reset_cast_details (cfg);
3975 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3976 * generic code is generated.
3979 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3981 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3984 MonoInst *rgctx, *addr;
3986 /* FIXME: What if the class is shared? We might not
3987 have to get the address of the method from the
3989 addr = emit_get_rgctx_method (cfg, context_used, method,
3990 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3992 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3994 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3996 gboolean pass_vtable, pass_mrgctx;
3997 MonoInst *rgctx_arg = NULL;
3999 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4000 g_assert (!pass_mrgctx);
4003 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4006 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4009 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4014 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4018 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4019 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4020 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4021 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4023 obj_reg = sp [0]->dreg;
4024 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4025 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4027 /* FIXME: generics */
4028 g_assert (klass->rank == 0);
4031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4032 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4034 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4038 MonoInst *element_class;
4040 /* This assertion is from the unboxcast insn */
4041 g_assert (klass->rank == 0);
4043 element_class = emit_get_rgctx_klass (cfg, context_used,
4044 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4046 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4047 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4049 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4050 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4051 reset_cast_details (cfg);
4054 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4055 MONO_ADD_INS (cfg->cbb, add);
4056 add->type = STACK_MP;
4063 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4065 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4066 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4070 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4076 args [1] = klass_inst;
4079 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4081 NEW_BBLOCK (cfg, is_ref_bb);
4082 NEW_BBLOCK (cfg, is_nullable_bb);
4083 NEW_BBLOCK (cfg, end_bb);
4084 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4086 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4091 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4092 addr_reg = alloc_dreg (cfg, STACK_MP);
4096 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4097 MONO_ADD_INS (cfg->cbb, addr);
4099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4102 MONO_START_BB (cfg, is_ref_bb);
4104 /* Save the ref to a temporary */
4105 dreg = alloc_ireg (cfg);
4106 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4107 addr->dreg = addr_reg;
4108 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4109 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4112 MONO_START_BB (cfg, is_nullable_bb);
4115 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4116 MonoInst *unbox_call;
4117 MonoMethodSignature *unbox_sig;
4119 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4120 unbox_sig->ret = &klass->byval_arg;
4121 unbox_sig->param_count = 1;
4122 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4123 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4125 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4126 addr->dreg = addr_reg;
4129 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4132 MONO_START_BB (cfg, end_bb);
4135 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4141 * Returns NULL and set the cfg exception on error.
4144 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4146 MonoInst *iargs [2];
4152 MonoInst *iargs [2];
4153 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4155 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4157 if (cfg->opt & MONO_OPT_SHARED)
4158 rgctx_info = MONO_RGCTX_INFO_KLASS;
4160 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4161 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4163 if (cfg->opt & MONO_OPT_SHARED) {
4164 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4166 alloc_ftn = mono_object_new;
4169 alloc_ftn = mono_object_new_specific;
4172 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4173 if (known_instance_size) {
4174 int size = mono_class_instance_size (klass);
4175 if (size < sizeof (MonoObject))
4176 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4178 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4180 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4183 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4186 if (cfg->opt & MONO_OPT_SHARED) {
4187 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4188 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4190 alloc_ftn = mono_object_new;
4191 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4192 /* This happens often in argument checking code, eg. throw new FooException... */
4193 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4194 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4195 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4197 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4198 MonoMethod *managed_alloc = NULL;
4202 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4203 cfg->exception_ptr = klass;
4207 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4209 if (managed_alloc) {
4210 int size = mono_class_instance_size (klass);
4211 if (size < sizeof (MonoObject))
4212 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4214 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4215 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4216 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4218 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4220 guint32 lw = vtable->klass->instance_size;
4221 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4222 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4223 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4226 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4230 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4234 * Returns NULL and set the cfg exception on error.
4237 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4239 MonoInst *alloc, *ins;
4241 if (mono_class_is_nullable (klass)) {
4242 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4245 /* FIXME: What if the class is shared? We might not
4246 have to get the method address from the RGCTX. */
4247 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4248 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4249 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4251 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4253 gboolean pass_vtable, pass_mrgctx;
4254 MonoInst *rgctx_arg = NULL;
4256 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4257 g_assert (!pass_mrgctx);
4260 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4263 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4266 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4270 if (mini_is_gsharedvt_klass (klass)) {
4271 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4272 MonoInst *res, *is_ref, *src_var, *addr;
4275 dreg = alloc_ireg (cfg);
4277 NEW_BBLOCK (cfg, is_ref_bb);
4278 NEW_BBLOCK (cfg, is_nullable_bb);
4279 NEW_BBLOCK (cfg, end_bb);
4280 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4285 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4288 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4291 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4292 ins->opcode = OP_STOREV_MEMBASE;
4294 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4295 res->type = STACK_OBJ;
4297 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4300 MONO_START_BB (cfg, is_ref_bb);
4302 /* val is a vtype, so has to load the value manually */
4303 src_var = get_vreg_to_inst (cfg, val->dreg);
4305 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4306 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4307 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4308 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4311 MONO_START_BB (cfg, is_nullable_bb);
4314 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4315 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4317 MonoMethodSignature *box_sig;
4320 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4321 * construct that method at JIT time, so have to do things by hand.
4323 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4324 box_sig->ret = &mono_defaults.object_class->byval_arg;
4325 box_sig->param_count = 1;
4326 box_sig->params [0] = &klass->byval_arg;
4327 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4328 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4329 res->type = STACK_OBJ;
4333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4335 MONO_START_BB (cfg, end_bb);
4339 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4343 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4349 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4352 MonoGenericContainer *container;
4353 MonoGenericInst *ginst;
4355 if (klass->generic_class) {
4356 container = klass->generic_class->container_class->generic_container;
4357 ginst = klass->generic_class->context.class_inst;
4358 } else if (klass->generic_container && context_used) {
4359 container = klass->generic_container;
4360 ginst = container->context.class_inst;
4365 for (i = 0; i < container->type_argc; ++i) {
4367 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4369 type = ginst->type_argv [i];
4370 if (mini_type_is_reference (type))
4376 static GHashTable* direct_icall_type_hash;
4379 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4381 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4382 if (!direct_icalls_enabled (cfg))
4386 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4387 * Whitelist a few icalls for now.
4389 if (!direct_icall_type_hash) {
4390 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4392 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4393 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4394 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4395 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4396 mono_memory_barrier ();
4397 direct_icall_type_hash = h;
4400 if (cmethod->klass == mono_defaults.math_class)
4402 /* No locking needed */
4403 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4408 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4411 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4413 MonoMethod *mono_castclass;
4416 mono_castclass = mono_marshal_get_castclass_with_cache ();
4418 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4419 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4420 reset_cast_details (cfg);
4426 get_castclass_cache_idx (MonoCompile *cfg)
4428 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4429 cfg->castclass_cache_index ++;
4430 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4434 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4443 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4446 if (cfg->compile_aot) {
4447 idx = get_castclass_cache_idx (cfg);
4448 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4450 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4453 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4454 return emit_castclass_with_cache (cfg, klass, args);
4458 * Returns NULL and set the cfg exception on error.
4461 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4463 MonoBasicBlock *is_null_bb;
4464 int obj_reg = src->dreg;
4465 int vtable_reg = alloc_preg (cfg);
4467 MonoInst *klass_inst = NULL, *res;
4469 context_used = mini_class_check_context_used (cfg, klass);
4471 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4472 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4473 (*inline_costs) += 2;
4475 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4476 MonoMethod *mono_castclass;
4477 MonoInst *iargs [1];
4480 mono_castclass = mono_marshal_get_castclass (klass);
4483 save_cast_details (cfg, klass, src->dreg, TRUE);
4484 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4485 iargs, ip, cfg->real_offset, TRUE);
4486 reset_cast_details (cfg);
4487 CHECK_CFG_EXCEPTION;
4488 g_assert (costs > 0);
4490 cfg->real_offset += 5;
4492 (*inline_costs) += costs;
4500 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4501 MonoInst *cache_ins;
4503 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4508 /* klass - it's the second element of the cache entry*/
4509 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4512 args [2] = cache_ins;
4514 return emit_castclass_with_cache (cfg, klass, args);
4517 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4520 NEW_BBLOCK (cfg, is_null_bb);
4522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4523 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4525 save_cast_details (cfg, klass, obj_reg, FALSE);
4527 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4528 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4529 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4531 int klass_reg = alloc_preg (cfg);
4533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4535 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4536 /* the remoting code is broken, access the class for now */
4537 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4538 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4540 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4541 cfg->exception_ptr = klass;
4544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4549 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4552 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4556 MONO_START_BB (cfg, is_null_bb);
4558 reset_cast_details (cfg);
4567 * Returns NULL and set the cfg exception on error.
4570 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4573 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4574 int obj_reg = src->dreg;
4575 int vtable_reg = alloc_preg (cfg);
4576 int res_reg = alloc_ireg_ref (cfg);
4577 MonoInst *klass_inst = NULL;
4582 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4583 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4584 MonoInst *cache_ins;
4586 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4591 /* klass - it's the second element of the cache entry*/
4592 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4595 args [2] = cache_ins;
4597 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4600 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4603 NEW_BBLOCK (cfg, is_null_bb);
4604 NEW_BBLOCK (cfg, false_bb);
4605 NEW_BBLOCK (cfg, end_bb);
4607 /* Do the assignment at the beginning, so the other assignment can be if converted */
4608 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4609 ins->type = STACK_OBJ;
4612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4613 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4617 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4618 g_assert (!context_used);
4619 /* the is_null_bb target simply copies the input register to the output */
4620 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4622 int klass_reg = alloc_preg (cfg);
4625 int rank_reg = alloc_preg (cfg);
4626 int eclass_reg = alloc_preg (cfg);
4628 g_assert (!context_used);
4629 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4634 if (klass->cast_class == mono_defaults.object_class) {
4635 int parent_reg = alloc_preg (cfg);
4636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4637 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4638 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4639 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4640 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4641 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4642 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4644 } else if (klass->cast_class == mono_defaults.enum_class) {
4645 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4646 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4647 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4648 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4650 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4651 /* Check that the object is a vector too */
4652 int bounds_reg = alloc_preg (cfg);
4653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4658 /* the is_null_bb target simply copies the input register to the output */
4659 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4661 } else if (mono_class_is_nullable (klass)) {
4662 g_assert (!context_used);
4663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4664 /* the is_null_bb target simply copies the input register to the output */
4665 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4667 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4668 g_assert (!context_used);
4669 /* the remoting code is broken, access the class for now */
4670 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4671 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4673 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4674 cfg->exception_ptr = klass;
4677 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4686 /* the is_null_bb target simply copies the input register to the output */
4687 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4692 MONO_START_BB (cfg, false_bb);
4694 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4697 MONO_START_BB (cfg, is_null_bb);
4699 MONO_START_BB (cfg, end_bb);
4705 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4707 /* This opcode takes as input an object reference and a class, and returns:
4708 0) if the object is an instance of the class,
4709 1) if the object is not instance of the class,
4710 2) if the object is a proxy whose type cannot be determined */
4713 #ifndef DISABLE_REMOTING
4714 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4716 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4718 int obj_reg = src->dreg;
4719 int dreg = alloc_ireg (cfg);
4721 #ifndef DISABLE_REMOTING
4722 int klass_reg = alloc_preg (cfg);
4725 NEW_BBLOCK (cfg, true_bb);
4726 NEW_BBLOCK (cfg, false_bb);
4727 NEW_BBLOCK (cfg, end_bb);
4728 #ifndef DISABLE_REMOTING
4729 NEW_BBLOCK (cfg, false2_bb);
4730 NEW_BBLOCK (cfg, no_proxy_bb);
4733 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4736 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4737 #ifndef DISABLE_REMOTING
4738 NEW_BBLOCK (cfg, interface_fail_bb);
4741 tmp_reg = alloc_preg (cfg);
4742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4743 #ifndef DISABLE_REMOTING
4744 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4745 MONO_START_BB (cfg, interface_fail_bb);
4746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4748 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4750 tmp_reg = alloc_preg (cfg);
4751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4753 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4755 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4758 #ifndef DISABLE_REMOTING
4759 tmp_reg = alloc_preg (cfg);
4760 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4763 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4764 tmp_reg = alloc_preg (cfg);
4765 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4768 tmp_reg = alloc_preg (cfg);
4769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4770 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4773 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4774 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4776 MONO_START_BB (cfg, no_proxy_bb);
4778 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4780 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4784 MONO_START_BB (cfg, false_bb);
4786 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4787 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4789 #ifndef DISABLE_REMOTING
4790 MONO_START_BB (cfg, false2_bb);
4792 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4793 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4796 MONO_START_BB (cfg, true_bb);
4798 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4800 MONO_START_BB (cfg, end_bb);
4803 MONO_INST_NEW (cfg, ins, OP_ICONST);
4805 ins->type = STACK_I4;
4811 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4813 /* This opcode takes as input an object reference and a class, and returns:
4814 0) if the object is an instance of the class,
4815 1) if the object is a proxy whose type cannot be determined
4816 an InvalidCastException exception is thrown otherwhise*/
4819 #ifndef DISABLE_REMOTING
4820 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4822 MonoBasicBlock *ok_result_bb;
4824 int obj_reg = src->dreg;
4825 int dreg = alloc_ireg (cfg);
4826 int tmp_reg = alloc_preg (cfg);
4828 #ifndef DISABLE_REMOTING
4829 int klass_reg = alloc_preg (cfg);
4830 NEW_BBLOCK (cfg, end_bb);
4833 NEW_BBLOCK (cfg, ok_result_bb);
4835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4836 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4838 save_cast_details (cfg, klass, obj_reg, FALSE);
4840 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4841 #ifndef DISABLE_REMOTING
4842 NEW_BBLOCK (cfg, interface_fail_bb);
4844 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4845 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4846 MONO_START_BB (cfg, interface_fail_bb);
4847 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4849 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4851 tmp_reg = alloc_preg (cfg);
4852 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4854 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4856 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4857 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4859 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4860 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4861 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4864 #ifndef DISABLE_REMOTING
4865 NEW_BBLOCK (cfg, no_proxy_bb);
4867 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4868 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4869 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4871 tmp_reg = alloc_preg (cfg);
4872 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4873 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4875 tmp_reg = alloc_preg (cfg);
4876 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4878 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4880 NEW_BBLOCK (cfg, fail_1_bb);
4882 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4884 MONO_START_BB (cfg, fail_1_bb);
4886 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4887 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4889 MONO_START_BB (cfg, no_proxy_bb);
4891 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4893 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4897 MONO_START_BB (cfg, ok_result_bb);
4899 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4901 #ifndef DISABLE_REMOTING
4902 MONO_START_BB (cfg, end_bb);
4906 MONO_INST_NEW (cfg, ins, OP_ICONST);
4908 ins->type = STACK_I4;
4913 static G_GNUC_UNUSED MonoInst*
4914 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4916 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4917 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4920 switch (enum_type->type) {
4923 #if SIZEOF_REGISTER == 8
4935 MonoInst *load, *and, *cmp, *ceq;
4936 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4937 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4938 int dest_reg = alloc_ireg (cfg);
4940 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4941 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4942 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4943 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4945 ceq->type = STACK_I4;
4948 load = mono_decompose_opcode (cfg, load);
4949 and = mono_decompose_opcode (cfg, and);
4950 cmp = mono_decompose_opcode (cfg, cmp);
4951 ceq = mono_decompose_opcode (cfg, ceq);
4959 * Returns NULL and set the cfg exception on error.
4961 static G_GNUC_UNUSED MonoInst*
4962 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4966 gpointer trampoline;
4967 MonoInst *obj, *method_ins, *tramp_ins;
4971 if (virtual && !cfg->llvm_only) {
4972 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4975 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4979 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4983 if (cfg->llvm_only) {
4984 MonoInst *args [16];
4987 * If the method to be called needs an rgctx, we can't fall back to mono_delegate_ctor (), since it might receive
4988 * the address of a gshared method. So use a JIT icall.
4989 * FIXME: Optimize this.
4993 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4994 mono_emit_jit_icall (cfg, virtual ? mono_init_delegate_virtual : mono_init_delegate, args);
4999 /* Inline the contents of mono_delegate_ctor */
5001 /* Set target field */
5002 /* Optimize away setting of NULL target */
5003 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5004 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5005 if (cfg->gen_write_barriers) {
5006 dreg = alloc_preg (cfg);
5007 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5008 emit_write_barrier (cfg, ptr, target);
5012 /* Set method field */
5013 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5014 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5017 * To avoid looking up the compiled code belonging to the target method
5018 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5019 * store it, and we fill it after the method has been compiled.
5021 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5022 MonoInst *code_slot_ins;
5025 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5027 domain = mono_domain_get ();
5028 mono_domain_lock (domain);
5029 if (!domain_jit_info (domain)->method_code_hash)
5030 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5031 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5033 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
5034 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5036 mono_domain_unlock (domain);
5038 if (cfg->compile_aot)
5039 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5041 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
5043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5046 if (cfg->compile_aot) {
5047 MonoDelegateClassMethodPair *del_tramp;
5049 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5050 del_tramp->klass = klass;
5051 del_tramp->method = context_used ? NULL : method;
5052 del_tramp->is_virtual = virtual;
5053 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5056 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5058 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5059 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5062 /* Set invoke_impl field */
5064 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5066 dreg = alloc_preg (cfg);
5067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5068 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5070 dreg = alloc_preg (cfg);
5071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5072 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5075 dreg = alloc_preg (cfg);
5076 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual ? 1 : 0);
5077 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5079 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5085 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5087 MonoJitICallInfo *info;
5089 /* Need to register the icall so it gets an icall wrapper */
5090 info = mono_get_array_new_va_icall (rank);
5092 cfg->flags |= MONO_CFG_HAS_VARARGS;
5094 /* mono_array_new_va () needs a vararg calling convention */
5095 cfg->disable_llvm = TRUE;
5097 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5098 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5102 * handle_constrained_gsharedvt_call:
5104 * Handle constrained calls where the receiver is a gsharedvt type.
5105 * Return the instruction representing the call. Set the cfg exception on failure.
5108 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5109 gboolean *ref_emit_widen)
5111 MonoInst *ins = NULL;
5112 gboolean emit_widen = *ref_emit_widen;
5115 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5116 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5117 * pack the arguments into an array, and do the rest of the work in in an icall.
5119 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5120 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5121 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5122 MonoInst *args [16];
5125 * This case handles calls to
5126 * - object:ToString()/Equals()/GetHashCode(),
5127 * - System.IComparable<T>:CompareTo()
5128 * - System.IEquatable<T>:Equals ()
5129 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5133 if (mono_method_check_context_used (cmethod))
5134 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5136 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5137 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5139 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5140 if (fsig->hasthis && fsig->param_count) {
5141 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5142 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5143 ins->dreg = alloc_preg (cfg);
5144 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5145 MONO_ADD_INS (cfg->cbb, ins);
5148 if (mini_is_gsharedvt_type (fsig->params [0])) {
5151 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5153 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5154 addr_reg = ins->dreg;
5155 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5157 EMIT_NEW_ICONST (cfg, args [3], 0);
5158 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5161 EMIT_NEW_ICONST (cfg, args [3], 0);
5162 EMIT_NEW_ICONST (cfg, args [4], 0);
5164 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5167 if (mini_is_gsharedvt_type (fsig->ret)) {
5168 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5169 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5173 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5174 MONO_ADD_INS (cfg->cbb, add);
5176 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5177 MONO_ADD_INS (cfg->cbb, ins);
5178 /* ins represents the call result */
5181 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5184 *ref_emit_widen = emit_widen;
5193 mono_emit_load_got_addr (MonoCompile *cfg)
5195 MonoInst *getaddr, *dummy_use;
5197 if (!cfg->got_var || cfg->got_var_allocated)
5200 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5201 getaddr->cil_code = cfg->header->code;
5202 getaddr->dreg = cfg->got_var->dreg;
5204 /* Add it to the start of the first bblock */
5205 if (cfg->bb_entry->code) {
5206 getaddr->next = cfg->bb_entry->code;
5207 cfg->bb_entry->code = getaddr;
5210 MONO_ADD_INS (cfg->bb_entry, getaddr);
5212 cfg->got_var_allocated = TRUE;
5215 * Add a dummy use to keep the got_var alive, since real uses might
5216 * only be generated by the back ends.
5217 * Add it to end_bblock, so the variable's lifetime covers the whole
5219 * It would be better to make the usage of the got var explicit in all
5220 * cases when the backend needs it (i.e. calls, throw etc.), so this
5221 * wouldn't be needed.
5223 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5224 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5227 static int inline_limit;
5228 static gboolean inline_limit_inited;
5231 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5233 MonoMethodHeaderSummary header;
5235 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5236 MonoMethodSignature *sig = mono_method_signature (method);
5240 if (cfg->disable_inline)
5245 if (cfg->inline_depth > 10)
5248 if (!mono_method_get_header_summary (method, &header))
5251 /*runtime, icall and pinvoke are checked by summary call*/
5252 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5253 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5254 (mono_class_is_marshalbyref (method->klass)) ||
5258 /* also consider num_locals? */
5259 /* Do the size check early to avoid creating vtables */
5260 if (!inline_limit_inited) {
5261 if (g_getenv ("MONO_INLINELIMIT"))
5262 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5264 inline_limit = INLINE_LENGTH_LIMIT;
5265 inline_limit_inited = TRUE;
5267 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5271 * if we can initialize the class of the method right away, we do,
5272 * otherwise we don't allow inlining if the class needs initialization,
5273 * since it would mean inserting a call to mono_runtime_class_init()
5274 * inside the inlined code
5276 if (!(cfg->opt & MONO_OPT_SHARED)) {
5277 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5278 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5279 vtable = mono_class_vtable (cfg->domain, method->klass);
5282 if (!cfg->compile_aot)
5283 mono_runtime_class_init (vtable);
5284 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5285 if (cfg->run_cctors && method->klass->has_cctor) {
5286 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5287 if (!method->klass->runtime_info)
5288 /* No vtable created yet */
5290 vtable = mono_class_vtable (cfg->domain, method->klass);
5293 /* This makes so that inline cannot trigger */
5294 /* .cctors: too many apps depend on them */
5295 /* running with a specific order... */
5296 if (! vtable->initialized)
5298 mono_runtime_class_init (vtable);
5300 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5301 if (!method->klass->runtime_info)
5302 /* No vtable created yet */
5304 vtable = mono_class_vtable (cfg->domain, method->klass);
5307 if (!vtable->initialized)
5312 * If we're compiling for shared code
5313 * the cctor will need to be run at aot method load time, for example,
5314 * or at the end of the compilation of the inlining method.
5316 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5320 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5321 if (mono_arch_is_soft_float ()) {
5323 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5325 for (i = 0; i < sig->param_count; ++i)
5326 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5331 if (g_list_find (cfg->dont_inline, method))
5338 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5340 if (!cfg->compile_aot) {
5342 if (vtable->initialized)
5346 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5347 if (cfg->method == method)
5351 if (!mono_class_needs_cctor_run (klass, method))
5354 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5355 /* The initialization is already done before the method is called */
5362 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5366 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5369 if (mini_is_gsharedvt_variable_klass (klass)) {
5372 mono_class_init (klass);
5373 size = mono_class_array_element_size (klass);
5376 mult_reg = alloc_preg (cfg);
5377 array_reg = arr->dreg;
5378 index_reg = index->dreg;
5380 #if SIZEOF_REGISTER == 8
5381 /* The array reg is 64 bits but the index reg is only 32 */
5382 if (COMPILE_LLVM (cfg)) {
5384 index2_reg = index_reg;
5386 index2_reg = alloc_preg (cfg);
5387 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5390 if (index->type == STACK_I8) {
5391 index2_reg = alloc_preg (cfg);
5392 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5394 index2_reg = index_reg;
5399 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5401 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5402 if (size == 1 || size == 2 || size == 4 || size == 8) {
5403 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5405 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5406 ins->klass = mono_class_get_element_class (klass);
5407 ins->type = STACK_MP;
5413 add_reg = alloc_ireg_mp (cfg);
5416 MonoInst *rgctx_ins;
5419 g_assert (cfg->gshared);
5420 context_used = mini_class_check_context_used (cfg, klass);
5421 g_assert (context_used);
5422 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5423 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5427 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5428 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5429 ins->klass = mono_class_get_element_class (klass);
5430 ins->type = STACK_MP;
5431 MONO_ADD_INS (cfg->cbb, ins);
5437 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5439 int bounds_reg = alloc_preg (cfg);
5440 int add_reg = alloc_ireg_mp (cfg);
5441 int mult_reg = alloc_preg (cfg);
5442 int mult2_reg = alloc_preg (cfg);
5443 int low1_reg = alloc_preg (cfg);
5444 int low2_reg = alloc_preg (cfg);
5445 int high1_reg = alloc_preg (cfg);
5446 int high2_reg = alloc_preg (cfg);
5447 int realidx1_reg = alloc_preg (cfg);
5448 int realidx2_reg = alloc_preg (cfg);
5449 int sum_reg = alloc_preg (cfg);
5450 int index1, index2, tmpreg;
5454 mono_class_init (klass);
5455 size = mono_class_array_element_size (klass);
5457 index1 = index_ins1->dreg;
5458 index2 = index_ins2->dreg;
5460 #if SIZEOF_REGISTER == 8
5461 /* The array reg is 64 bits but the index reg is only 32 */
5462 if (COMPILE_LLVM (cfg)) {
5465 tmpreg = alloc_preg (cfg);
5466 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5468 tmpreg = alloc_preg (cfg);
5469 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5473 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5477 /* range checking */
5478 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5479 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5482 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5483 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5485 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5486 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5487 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5489 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5490 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5491 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5493 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5494 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5495 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5497 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5498 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5501 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5503 ins->type = STACK_MP;
5505 MONO_ADD_INS (cfg->cbb, ins);
5511 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5515 MonoMethod *addr_method;
5517 MonoClass *eclass = cmethod->klass->element_class;
5519 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5522 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5524 /* emit_ldelema_2 depends on OP_LMUL */
5525 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5526 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5529 if (mini_is_gsharedvt_variable_klass (eclass))
5532 element_size = mono_class_array_element_size (eclass);
5533 addr_method = mono_marshal_get_array_address (rank, element_size);
5534 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5539 static MonoBreakPolicy
5540 always_insert_breakpoint (MonoMethod *method)
5542 return MONO_BREAK_POLICY_ALWAYS;
5545 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5548 * mono_set_break_policy:
5549 * policy_callback: the new callback function
5551 * Allow embedders to decide wherther to actually obey breakpoint instructions
5552 * (both break IL instructions and Debugger.Break () method calls), for example
5553 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5554 * untrusted or semi-trusted code.
5556 * @policy_callback will be called every time a break point instruction needs to
5557 * be inserted with the method argument being the method that calls Debugger.Break()
5558 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5559 * if it wants the breakpoint to not be effective in the given method.
5560 * #MONO_BREAK_POLICY_ALWAYS is the default.
5563 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5565 if (policy_callback)
5566 break_policy_func = policy_callback;
5568 break_policy_func = always_insert_breakpoint;
5572 should_insert_brekpoint (MonoMethod *method) {
5573 switch (break_policy_func (method)) {
5574 case MONO_BREAK_POLICY_ALWAYS:
5576 case MONO_BREAK_POLICY_NEVER:
5578 case MONO_BREAK_POLICY_ON_DBG:
5579 g_warning ("mdb no longer supported");
5582 g_warning ("Incorrect value returned from break policy callback");
5587 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5589 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5591 MonoInst *addr, *store, *load;
5592 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5594 /* the bounds check is already done by the callers */
5595 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5597 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5598 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5599 if (mini_type_is_reference (fsig->params [2]))
5600 emit_write_barrier (cfg, addr, load);
5602 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5603 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5610 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5612 return mini_type_is_reference (&klass->byval_arg);
5616 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5618 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5619 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5620 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5621 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5622 MonoInst *iargs [3];
5625 mono_class_setup_vtable (obj_array);
5626 g_assert (helper->slot);
5628 if (sp [0]->type != STACK_OBJ)
5630 if (sp [2]->type != STACK_OBJ)
5637 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5641 if (mini_is_gsharedvt_variable_klass (klass)) {
5644 // FIXME-VT: OP_ICONST optimization
5645 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5646 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5647 ins->opcode = OP_STOREV_MEMBASE;
5648 } else if (sp [1]->opcode == OP_ICONST) {
5649 int array_reg = sp [0]->dreg;
5650 int index_reg = sp [1]->dreg;
5651 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5654 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5655 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5657 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5658 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5659 if (generic_class_is_reference_type (cfg, klass))
5660 emit_write_barrier (cfg, addr, sp [2]);
5667 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5672 eklass = mono_class_from_mono_type (fsig->params [2]);
5674 eklass = mono_class_from_mono_type (fsig->ret);
5677 return emit_array_store (cfg, eklass, args, FALSE);
5679 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5686 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5689 int param_size, return_size;
5691 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5692 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5694 if (cfg->verbose_level > 3)
5695 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5697 //Only allow for valuetypes
5698 if (!param_klass->valuetype || !return_klass->valuetype) {
5699 if (cfg->verbose_level > 3)
5700 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is not a valuetype\n");
5705 if (param_klass->has_references || return_klass->has_references)
5708 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5709 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5710 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5711 if (cfg->verbose_level > 3)
5712 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5716 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5717 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5718 if (cfg->verbose_level > 3)
5719 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5723 param_size = mono_class_value_size (param_klass, &align);
5724 return_size = mono_class_value_size (return_klass, &align);
5726 //We can do it if sizes match
5727 if (param_size == return_size) {
5728 if (cfg->verbose_level > 3)
5729 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5733 //No simple way to handle struct if sizes don't match
5734 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5735 if (cfg->verbose_level > 3)
5736 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5741 * Same reg size category.
5742 * A quick note on why we don't require widening here.
5743 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5745 * Since the source value comes from a function argument, the JIT will already have
5746 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5748 if (param_size <= 4 && return_size <= 4) {
5749 if (cfg->verbose_level > 3)
5750 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5758 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5760 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5761 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5763 //Valuetypes that are semantically equivalent or numbers than can be widened to
5764 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5767 //Arrays of valuetypes that are semantically equivalent
5768 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5775 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5777 #ifdef MONO_ARCH_SIMD_INTRINSICS
5778 MonoInst *ins = NULL;
5780 if (cfg->opt & MONO_OPT_SIMD) {
5781 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5787 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5791 emit_memory_barrier (MonoCompile *cfg, int kind)
5793 MonoInst *ins = NULL;
5794 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5795 MONO_ADD_INS (cfg->cbb, ins);
5796 ins->backend.memory_barrier_kind = kind;
5802 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5804 MonoInst *ins = NULL;
5807 /* The LLVM backend supports these intrinsics */
5808 if (cmethod->klass == mono_defaults.math_class) {
5809 if (strcmp (cmethod->name, "Sin") == 0) {
5811 } else if (strcmp (cmethod->name, "Cos") == 0) {
5813 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5815 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5819 if (opcode && fsig->param_count == 1) {
5820 MONO_INST_NEW (cfg, ins, opcode);
5821 ins->type = STACK_R8;
5822 ins->dreg = mono_alloc_freg (cfg);
5823 ins->sreg1 = args [0]->dreg;
5824 MONO_ADD_INS (cfg->cbb, ins);
5828 if (cfg->opt & MONO_OPT_CMOV) {
5829 if (strcmp (cmethod->name, "Min") == 0) {
5830 if (fsig->params [0]->type == MONO_TYPE_I4)
5832 if (fsig->params [0]->type == MONO_TYPE_U4)
5833 opcode = OP_IMIN_UN;
5834 else if (fsig->params [0]->type == MONO_TYPE_I8)
5836 else if (fsig->params [0]->type == MONO_TYPE_U8)
5837 opcode = OP_LMIN_UN;
5838 } else if (strcmp (cmethod->name, "Max") == 0) {
5839 if (fsig->params [0]->type == MONO_TYPE_I4)
5841 if (fsig->params [0]->type == MONO_TYPE_U4)
5842 opcode = OP_IMAX_UN;
5843 else if (fsig->params [0]->type == MONO_TYPE_I8)
5845 else if (fsig->params [0]->type == MONO_TYPE_U8)
5846 opcode = OP_LMAX_UN;
5850 if (opcode && fsig->param_count == 2) {
5851 MONO_INST_NEW (cfg, ins, opcode);
5852 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5853 ins->dreg = mono_alloc_ireg (cfg);
5854 ins->sreg1 = args [0]->dreg;
5855 ins->sreg2 = args [1]->dreg;
5856 MONO_ADD_INS (cfg->cbb, ins);
5864 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5866 if (cmethod->klass == mono_defaults.array_class) {
5867 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5868 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5869 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5870 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5871 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5872 return emit_array_unsafe_mov (cfg, fsig, args);
5879 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5881 MonoInst *ins = NULL;
5883 static MonoClass *runtime_helpers_class = NULL;
5884 if (! runtime_helpers_class)
5885 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5886 "System.Runtime.CompilerServices", "RuntimeHelpers");
5888 if (cmethod->klass == mono_defaults.string_class) {
5889 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5890 int dreg = alloc_ireg (cfg);
5891 int index_reg = alloc_preg (cfg);
5892 int add_reg = alloc_preg (cfg);
5894 #if SIZEOF_REGISTER == 8
5895 /* The array reg is 64 bits but the index reg is only 32 */
5896 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5898 index_reg = args [1]->dreg;
5900 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5902 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5903 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5904 add_reg = ins->dreg;
5905 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5908 int mult_reg = alloc_preg (cfg);
5909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5910 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5911 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5912 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5914 type_from_op (cfg, ins, NULL, NULL);
5916 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5917 int dreg = alloc_ireg (cfg);
5918 /* Decompose later to allow more optimizations */
5919 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5920 ins->type = STACK_I4;
5921 ins->flags |= MONO_INST_FAULT;
5922 cfg->cbb->has_array_access = TRUE;
5923 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5928 } else if (cmethod->klass == mono_defaults.object_class) {
5930 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5931 int dreg = alloc_ireg_ref (cfg);
5932 int vt_reg = alloc_preg (cfg);
5933 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5934 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5935 type_from_op (cfg, ins, NULL, NULL);
5938 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5939 int dreg = alloc_ireg (cfg);
5940 int t1 = alloc_ireg (cfg);
5942 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5943 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5944 ins->type = STACK_I4;
5947 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5948 MONO_INST_NEW (cfg, ins, OP_NOP);
5949 MONO_ADD_INS (cfg->cbb, ins);
5953 } else if (cmethod->klass == mono_defaults.array_class) {
5954 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5955 return emit_array_generic_access (cfg, fsig, args, FALSE);
5956 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5957 return emit_array_generic_access (cfg, fsig, args, TRUE);
5959 #ifndef MONO_BIG_ARRAYS
5961 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5964 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5965 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5966 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5967 int dreg = alloc_ireg (cfg);
5968 int bounds_reg = alloc_ireg_mp (cfg);
5969 MonoBasicBlock *end_bb, *szarray_bb;
5970 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5972 NEW_BBLOCK (cfg, end_bb);
5973 NEW_BBLOCK (cfg, szarray_bb);
5975 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5976 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5977 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5978 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5979 /* Non-szarray case */
5981 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5982 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5985 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5986 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5987 MONO_START_BB (cfg, szarray_bb);
5990 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5991 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5993 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5994 MONO_START_BB (cfg, end_bb);
5996 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5997 ins->type = STACK_I4;
6003 if (cmethod->name [0] != 'g')
6006 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6007 int dreg = alloc_ireg (cfg);
6008 int vtable_reg = alloc_preg (cfg);
6009 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6010 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6011 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6012 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6013 type_from_op (cfg, ins, NULL, NULL);
6016 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6017 int dreg = alloc_ireg (cfg);
6019 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6020 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6021 type_from_op (cfg, ins, NULL, NULL);
6026 } else if (cmethod->klass == runtime_helpers_class) {
6028 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6029 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6033 } else if (cmethod->klass == mono_defaults.thread_class) {
6034 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6035 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6036 MONO_ADD_INS (cfg->cbb, ins);
6038 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6039 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6040 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6042 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6044 if (fsig->params [0]->type == MONO_TYPE_I1)
6045 opcode = OP_LOADI1_MEMBASE;
6046 else if (fsig->params [0]->type == MONO_TYPE_U1)
6047 opcode = OP_LOADU1_MEMBASE;
6048 else if (fsig->params [0]->type == MONO_TYPE_I2)
6049 opcode = OP_LOADI2_MEMBASE;
6050 else if (fsig->params [0]->type == MONO_TYPE_U2)
6051 opcode = OP_LOADU2_MEMBASE;
6052 else if (fsig->params [0]->type == MONO_TYPE_I4)
6053 opcode = OP_LOADI4_MEMBASE;
6054 else if (fsig->params [0]->type == MONO_TYPE_U4)
6055 opcode = OP_LOADU4_MEMBASE;
6056 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6057 opcode = OP_LOADI8_MEMBASE;
6058 else if (fsig->params [0]->type == MONO_TYPE_R4)
6059 opcode = OP_LOADR4_MEMBASE;
6060 else if (fsig->params [0]->type == MONO_TYPE_R8)
6061 opcode = OP_LOADR8_MEMBASE;
6062 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6063 opcode = OP_LOAD_MEMBASE;
6066 MONO_INST_NEW (cfg, ins, opcode);
6067 ins->inst_basereg = args [0]->dreg;
6068 ins->inst_offset = 0;
6069 MONO_ADD_INS (cfg->cbb, ins);
6071 switch (fsig->params [0]->type) {
6078 ins->dreg = mono_alloc_ireg (cfg);
6079 ins->type = STACK_I4;
6083 ins->dreg = mono_alloc_lreg (cfg);
6084 ins->type = STACK_I8;
6088 ins->dreg = mono_alloc_ireg (cfg);
6089 #if SIZEOF_REGISTER == 8
6090 ins->type = STACK_I8;
6092 ins->type = STACK_I4;
6097 ins->dreg = mono_alloc_freg (cfg);
6098 ins->type = STACK_R8;
6101 g_assert (mini_type_is_reference (fsig->params [0]));
6102 ins->dreg = mono_alloc_ireg_ref (cfg);
6103 ins->type = STACK_OBJ;
6107 if (opcode == OP_LOADI8_MEMBASE)
6108 ins = mono_decompose_opcode (cfg, ins);
6110 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
6114 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6116 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6118 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6119 opcode = OP_STOREI1_MEMBASE_REG;
6120 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6121 opcode = OP_STOREI2_MEMBASE_REG;
6122 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6123 opcode = OP_STOREI4_MEMBASE_REG;
6124 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6125 opcode = OP_STOREI8_MEMBASE_REG;
6126 else if (fsig->params [0]->type == MONO_TYPE_R4)
6127 opcode = OP_STORER4_MEMBASE_REG;
6128 else if (fsig->params [0]->type == MONO_TYPE_R8)
6129 opcode = OP_STORER8_MEMBASE_REG;
6130 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6131 opcode = OP_STORE_MEMBASE_REG;
6134 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6136 MONO_INST_NEW (cfg, ins, opcode);
6137 ins->sreg1 = args [1]->dreg;
6138 ins->inst_destbasereg = args [0]->dreg;
6139 ins->inst_offset = 0;
6140 MONO_ADD_INS (cfg->cbb, ins);
6142 if (opcode == OP_STOREI8_MEMBASE_REG)
6143 ins = mono_decompose_opcode (cfg, ins);
6148 } else if (cmethod->klass->image == mono_defaults.corlib &&
6149 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6150 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6153 #if SIZEOF_REGISTER == 8
6154 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6155 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6156 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6157 ins->dreg = mono_alloc_preg (cfg);
6158 ins->sreg1 = args [0]->dreg;
6159 ins->type = STACK_I8;
6160 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6161 MONO_ADD_INS (cfg->cbb, ins);
6165 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6167 /* 64 bit reads are already atomic */
6168 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6169 load_ins->dreg = mono_alloc_preg (cfg);
6170 load_ins->inst_basereg = args [0]->dreg;
6171 load_ins->inst_offset = 0;
6172 load_ins->type = STACK_I8;
6173 MONO_ADD_INS (cfg->cbb, load_ins);
6175 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6182 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6183 MonoInst *ins_iconst;
6186 if (fsig->params [0]->type == MONO_TYPE_I4) {
6187 opcode = OP_ATOMIC_ADD_I4;
6188 cfg->has_atomic_add_i4 = TRUE;
6190 #if SIZEOF_REGISTER == 8
6191 else if (fsig->params [0]->type == MONO_TYPE_I8)
6192 opcode = OP_ATOMIC_ADD_I8;
6195 if (!mono_arch_opcode_supported (opcode))
6197 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6198 ins_iconst->inst_c0 = 1;
6199 ins_iconst->dreg = mono_alloc_ireg (cfg);
6200 MONO_ADD_INS (cfg->cbb, ins_iconst);
6202 MONO_INST_NEW (cfg, ins, opcode);
6203 ins->dreg = mono_alloc_ireg (cfg);
6204 ins->inst_basereg = args [0]->dreg;
6205 ins->inst_offset = 0;
6206 ins->sreg2 = ins_iconst->dreg;
6207 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6208 MONO_ADD_INS (cfg->cbb, ins);
6210 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6211 MonoInst *ins_iconst;
6214 if (fsig->params [0]->type == MONO_TYPE_I4) {
6215 opcode = OP_ATOMIC_ADD_I4;
6216 cfg->has_atomic_add_i4 = TRUE;
6218 #if SIZEOF_REGISTER == 8
6219 else if (fsig->params [0]->type == MONO_TYPE_I8)
6220 opcode = OP_ATOMIC_ADD_I8;
6223 if (!mono_arch_opcode_supported (opcode))
6225 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6226 ins_iconst->inst_c0 = -1;
6227 ins_iconst->dreg = mono_alloc_ireg (cfg);
6228 MONO_ADD_INS (cfg->cbb, ins_iconst);
6230 MONO_INST_NEW (cfg, ins, opcode);
6231 ins->dreg = mono_alloc_ireg (cfg);
6232 ins->inst_basereg = args [0]->dreg;
6233 ins->inst_offset = 0;
6234 ins->sreg2 = ins_iconst->dreg;
6235 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6236 MONO_ADD_INS (cfg->cbb, ins);
6238 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6241 if (fsig->params [0]->type == MONO_TYPE_I4) {
6242 opcode = OP_ATOMIC_ADD_I4;
6243 cfg->has_atomic_add_i4 = TRUE;
6245 #if SIZEOF_REGISTER == 8
6246 else if (fsig->params [0]->type == MONO_TYPE_I8)
6247 opcode = OP_ATOMIC_ADD_I8;
6250 if (!mono_arch_opcode_supported (opcode))
6252 MONO_INST_NEW (cfg, ins, opcode);
6253 ins->dreg = mono_alloc_ireg (cfg);
6254 ins->inst_basereg = args [0]->dreg;
6255 ins->inst_offset = 0;
6256 ins->sreg2 = args [1]->dreg;
6257 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6258 MONO_ADD_INS (cfg->cbb, ins);
6261 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6262 MonoInst *f2i = NULL, *i2f;
6263 guint32 opcode, f2i_opcode, i2f_opcode;
6264 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6265 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6267 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6268 fsig->params [0]->type == MONO_TYPE_R4) {
6269 opcode = OP_ATOMIC_EXCHANGE_I4;
6270 f2i_opcode = OP_MOVE_F_TO_I4;
6271 i2f_opcode = OP_MOVE_I4_TO_F;
6272 cfg->has_atomic_exchange_i4 = TRUE;
6274 #if SIZEOF_REGISTER == 8
6276 fsig->params [0]->type == MONO_TYPE_I8 ||
6277 fsig->params [0]->type == MONO_TYPE_R8 ||
6278 fsig->params [0]->type == MONO_TYPE_I) {
6279 opcode = OP_ATOMIC_EXCHANGE_I8;
6280 f2i_opcode = OP_MOVE_F_TO_I8;
6281 i2f_opcode = OP_MOVE_I8_TO_F;
6284 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6285 opcode = OP_ATOMIC_EXCHANGE_I4;
6286 cfg->has_atomic_exchange_i4 = TRUE;
6292 if (!mono_arch_opcode_supported (opcode))
6296 /* TODO: Decompose these opcodes instead of bailing here. */
6297 if (COMPILE_SOFT_FLOAT (cfg))
6300 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6301 f2i->dreg = mono_alloc_ireg (cfg);
6302 f2i->sreg1 = args [1]->dreg;
6303 if (f2i_opcode == OP_MOVE_F_TO_I4)
6304 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6305 MONO_ADD_INS (cfg->cbb, f2i);
6308 MONO_INST_NEW (cfg, ins, opcode);
6309 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6310 ins->inst_basereg = args [0]->dreg;
6311 ins->inst_offset = 0;
6312 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6313 MONO_ADD_INS (cfg->cbb, ins);
6315 switch (fsig->params [0]->type) {
6317 ins->type = STACK_I4;
6320 ins->type = STACK_I8;
6323 #if SIZEOF_REGISTER == 8
6324 ins->type = STACK_I8;
6326 ins->type = STACK_I4;
6331 ins->type = STACK_R8;
6334 g_assert (mini_type_is_reference (fsig->params [0]));
6335 ins->type = STACK_OBJ;
6340 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6341 i2f->dreg = mono_alloc_freg (cfg);
6342 i2f->sreg1 = ins->dreg;
6343 i2f->type = STACK_R8;
6344 if (i2f_opcode == OP_MOVE_I4_TO_F)
6345 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6346 MONO_ADD_INS (cfg->cbb, i2f);
6351 if (cfg->gen_write_barriers && is_ref)
6352 emit_write_barrier (cfg, args [0], args [1]);
6354 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6355 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6356 guint32 opcode, f2i_opcode, i2f_opcode;
6357 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6358 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6360 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6361 fsig->params [1]->type == MONO_TYPE_R4) {
6362 opcode = OP_ATOMIC_CAS_I4;
6363 f2i_opcode = OP_MOVE_F_TO_I4;
6364 i2f_opcode = OP_MOVE_I4_TO_F;
6365 cfg->has_atomic_cas_i4 = TRUE;
6367 #if SIZEOF_REGISTER == 8
6369 fsig->params [1]->type == MONO_TYPE_I8 ||
6370 fsig->params [1]->type == MONO_TYPE_R8 ||
6371 fsig->params [1]->type == MONO_TYPE_I) {
6372 opcode = OP_ATOMIC_CAS_I8;
6373 f2i_opcode = OP_MOVE_F_TO_I8;
6374 i2f_opcode = OP_MOVE_I8_TO_F;
6377 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6378 opcode = OP_ATOMIC_CAS_I4;
6379 cfg->has_atomic_cas_i4 = TRUE;
6385 if (!mono_arch_opcode_supported (opcode))
6389 /* TODO: Decompose these opcodes instead of bailing here. */
6390 if (COMPILE_SOFT_FLOAT (cfg))
6393 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6394 f2i_new->dreg = mono_alloc_ireg (cfg);
6395 f2i_new->sreg1 = args [1]->dreg;
6396 if (f2i_opcode == OP_MOVE_F_TO_I4)
6397 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6398 MONO_ADD_INS (cfg->cbb, f2i_new);
6400 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6401 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6402 f2i_cmp->sreg1 = args [2]->dreg;
6403 if (f2i_opcode == OP_MOVE_F_TO_I4)
6404 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6405 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6408 MONO_INST_NEW (cfg, ins, opcode);
6409 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6410 ins->sreg1 = args [0]->dreg;
6411 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6412 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6413 MONO_ADD_INS (cfg->cbb, ins);
6415 switch (fsig->params [1]->type) {
6417 ins->type = STACK_I4;
6420 ins->type = STACK_I8;
6423 #if SIZEOF_REGISTER == 8
6424 ins->type = STACK_I8;
6426 ins->type = STACK_I4;
6430 ins->type = cfg->r4_stack_type;
6433 ins->type = STACK_R8;
6436 g_assert (mini_type_is_reference (fsig->params [1]));
6437 ins->type = STACK_OBJ;
6442 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6443 i2f->dreg = mono_alloc_freg (cfg);
6444 i2f->sreg1 = ins->dreg;
6445 i2f->type = STACK_R8;
6446 if (i2f_opcode == OP_MOVE_I4_TO_F)
6447 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6448 MONO_ADD_INS (cfg->cbb, i2f);
6453 if (cfg->gen_write_barriers && is_ref)
6454 emit_write_barrier (cfg, args [0], args [1]);
6456 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6457 fsig->params [1]->type == MONO_TYPE_I4) {
6458 MonoInst *cmp, *ceq;
6460 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6463 /* int32 r = CAS (location, value, comparand); */
6464 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6465 ins->dreg = alloc_ireg (cfg);
6466 ins->sreg1 = args [0]->dreg;
6467 ins->sreg2 = args [1]->dreg;
6468 ins->sreg3 = args [2]->dreg;
6469 ins->type = STACK_I4;
6470 MONO_ADD_INS (cfg->cbb, ins);
6472 /* bool result = r == comparand; */
6473 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6474 cmp->sreg1 = ins->dreg;
6475 cmp->sreg2 = args [2]->dreg;
6476 cmp->type = STACK_I4;
6477 MONO_ADD_INS (cfg->cbb, cmp);
6479 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6480 ceq->dreg = alloc_ireg (cfg);
6481 ceq->type = STACK_I4;
6482 MONO_ADD_INS (cfg->cbb, ceq);
6484 /* *success = result; */
6485 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6487 cfg->has_atomic_cas_i4 = TRUE;
6489 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6490 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6494 } else if (cmethod->klass->image == mono_defaults.corlib &&
6495 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6496 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6499 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6501 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6502 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6504 if (fsig->params [0]->type == MONO_TYPE_I1)
6505 opcode = OP_ATOMIC_LOAD_I1;
6506 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6507 opcode = OP_ATOMIC_LOAD_U1;
6508 else if (fsig->params [0]->type == MONO_TYPE_I2)
6509 opcode = OP_ATOMIC_LOAD_I2;
6510 else if (fsig->params [0]->type == MONO_TYPE_U2)
6511 opcode = OP_ATOMIC_LOAD_U2;
6512 else if (fsig->params [0]->type == MONO_TYPE_I4)
6513 opcode = OP_ATOMIC_LOAD_I4;
6514 else if (fsig->params [0]->type == MONO_TYPE_U4)
6515 opcode = OP_ATOMIC_LOAD_U4;
6516 else if (fsig->params [0]->type == MONO_TYPE_R4)
6517 opcode = OP_ATOMIC_LOAD_R4;
6518 else if (fsig->params [0]->type == MONO_TYPE_R8)
6519 opcode = OP_ATOMIC_LOAD_R8;
6520 #if SIZEOF_REGISTER == 8
6521 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6522 opcode = OP_ATOMIC_LOAD_I8;
6523 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6524 opcode = OP_ATOMIC_LOAD_U8;
6526 else if (fsig->params [0]->type == MONO_TYPE_I)
6527 opcode = OP_ATOMIC_LOAD_I4;
6528 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6529 opcode = OP_ATOMIC_LOAD_U4;
6533 if (!mono_arch_opcode_supported (opcode))
6536 MONO_INST_NEW (cfg, ins, opcode);
6537 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6538 ins->sreg1 = args [0]->dreg;
6539 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6540 MONO_ADD_INS (cfg->cbb, ins);
6542 switch (fsig->params [0]->type) {
6543 case MONO_TYPE_BOOLEAN:
6550 ins->type = STACK_I4;
6554 ins->type = STACK_I8;
6558 #if SIZEOF_REGISTER == 8
6559 ins->type = STACK_I8;
6561 ins->type = STACK_I4;
6565 ins->type = cfg->r4_stack_type;
6568 ins->type = STACK_R8;
6571 g_assert (mini_type_is_reference (fsig->params [0]));
6572 ins->type = STACK_OBJ;
6578 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6580 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6582 if (fsig->params [0]->type == MONO_TYPE_I1)
6583 opcode = OP_ATOMIC_STORE_I1;
6584 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6585 opcode = OP_ATOMIC_STORE_U1;
6586 else if (fsig->params [0]->type == MONO_TYPE_I2)
6587 opcode = OP_ATOMIC_STORE_I2;
6588 else if (fsig->params [0]->type == MONO_TYPE_U2)
6589 opcode = OP_ATOMIC_STORE_U2;
6590 else if (fsig->params [0]->type == MONO_TYPE_I4)
6591 opcode = OP_ATOMIC_STORE_I4;
6592 else if (fsig->params [0]->type == MONO_TYPE_U4)
6593 opcode = OP_ATOMIC_STORE_U4;
6594 else if (fsig->params [0]->type == MONO_TYPE_R4)
6595 opcode = OP_ATOMIC_STORE_R4;
6596 else if (fsig->params [0]->type == MONO_TYPE_R8)
6597 opcode = OP_ATOMIC_STORE_R8;
6598 #if SIZEOF_REGISTER == 8
6599 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6600 opcode = OP_ATOMIC_STORE_I8;
6601 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6602 opcode = OP_ATOMIC_STORE_U8;
6604 else if (fsig->params [0]->type == MONO_TYPE_I)
6605 opcode = OP_ATOMIC_STORE_I4;
6606 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6607 opcode = OP_ATOMIC_STORE_U4;
6611 if (!mono_arch_opcode_supported (opcode))
6614 MONO_INST_NEW (cfg, ins, opcode);
6615 ins->dreg = args [0]->dreg;
6616 ins->sreg1 = args [1]->dreg;
6617 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6618 MONO_ADD_INS (cfg->cbb, ins);
6620 if (cfg->gen_write_barriers && is_ref)
6621 emit_write_barrier (cfg, args [0], args [1]);
6627 } else if (cmethod->klass->image == mono_defaults.corlib &&
6628 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6629 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6630 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6631 if (should_insert_brekpoint (cfg->method)) {
6632 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6634 MONO_INST_NEW (cfg, ins, OP_NOP);
6635 MONO_ADD_INS (cfg->cbb, ins);
6639 } else if (cmethod->klass->image == mono_defaults.corlib &&
6640 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6641 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6642 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6644 EMIT_NEW_ICONST (cfg, ins, 1);
6646 EMIT_NEW_ICONST (cfg, ins, 0);
6649 } else if (cmethod->klass->image == mono_defaults.corlib &&
6650 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6651 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6652 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6653 /* No stack walks are current available, so implement this as an intrinsic */
6654 MonoInst *assembly_ins;
6656 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6657 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6660 } else if (cmethod->klass == mono_defaults.math_class) {
6662 * There is general branchless code for Min/Max, but it does not work for
6664 * http://everything2.com/?node_id=1051618
6666 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6667 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6668 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6669 !strcmp (cmethod->klass->name, "Selector")) ||
6670 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6671 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6672 !strcmp (cmethod->klass->name, "Selector"))
6674 if (cfg->backend->have_objc_get_selector &&
6675 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6676 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6679 MonoJumpInfoToken *ji;
6682 cfg->disable_llvm = TRUE;
6684 if (args [0]->opcode == OP_GOT_ENTRY) {
6685 pi = args [0]->inst_p1;
6686 g_assert (pi->opcode == OP_PATCH_INFO);
6687 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6690 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6691 ji = args [0]->inst_p0;
6694 NULLIFY_INS (args [0]);
6697 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6698 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6699 ins->dreg = mono_alloc_ireg (cfg);
6701 ins->inst_p0 = mono_string_to_utf8 (s);
6702 MONO_ADD_INS (cfg->cbb, ins);
6707 #ifdef MONO_ARCH_SIMD_INTRINSICS
6708 if (cfg->opt & MONO_OPT_SIMD) {
6709 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6715 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6719 if (COMPILE_LLVM (cfg)) {
6720 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6725 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6729 * This entry point could be used later for arbitrary method
6732 inline static MonoInst*
6733 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6734 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6736 if (method->klass == mono_defaults.string_class) {
6737 /* managed string allocation support */
6738 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6739 MonoInst *iargs [2];
6740 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6741 MonoMethod *managed_alloc = NULL;
6743 g_assert (vtable); /*Should not fail since it System.String*/
6744 #ifndef MONO_CROSS_COMPILE
6745 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6749 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6750 iargs [1] = args [0];
6751 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6758 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6760 MonoInst *store, *temp;
6763 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6764 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6767 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6768 * would be different than the MonoInst's used to represent arguments, and
6769 * the ldelema implementation can't deal with that.
6770 * Solution: When ldelema is used on an inline argument, create a var for
6771 * it, emit ldelema on that var, and emit the saving code below in
6772 * inline_method () if needed.
6774 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6775 cfg->args [i] = temp;
6776 /* This uses cfg->args [i] which is set by the preceeding line */
6777 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6778 store->cil_code = sp [0]->cil_code;
6783 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6784 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6786 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6788 check_inline_called_method_name_limit (MonoMethod *called_method)
6791 static const char *limit = NULL;
6793 if (limit == NULL) {
6794 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6796 if (limit_string != NULL)
6797 limit = limit_string;
6802 if (limit [0] != '\0') {
6803 char *called_method_name = mono_method_full_name (called_method, TRUE);
6805 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6806 g_free (called_method_name);
6808 //return (strncmp_result <= 0);
6809 return (strncmp_result == 0);
6816 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6818 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6821 static const char *limit = NULL;
6823 if (limit == NULL) {
6824 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6825 if (limit_string != NULL) {
6826 limit = limit_string;
6832 if (limit [0] != '\0') {
6833 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6835 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6836 g_free (caller_method_name);
6838 //return (strncmp_result <= 0);
6839 return (strncmp_result == 0);
6847 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6849 static double r8_0 = 0.0;
6850 static float r4_0 = 0.0;
6854 rtype = mini_get_underlying_type (rtype);
6858 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6859 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6860 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6861 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6862 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6863 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6864 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6865 ins->type = STACK_R4;
6866 ins->inst_p0 = (void*)&r4_0;
6868 MONO_ADD_INS (cfg->cbb, ins);
6869 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6870 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6871 ins->type = STACK_R8;
6872 ins->inst_p0 = (void*)&r8_0;
6874 MONO_ADD_INS (cfg->cbb, ins);
6875 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6876 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6877 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6878 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6879 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6881 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6886 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6890 rtype = mini_get_underlying_type (rtype);
6894 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6895 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6896 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6897 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6898 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6899 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6900 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6901 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6902 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6903 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6904 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6905 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6906 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6907 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6909 emit_init_rvar (cfg, dreg, rtype);
6913 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6915 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6917 MonoInst *var = cfg->locals [local];
6918 if (COMPILE_SOFT_FLOAT (cfg)) {
6920 int reg = alloc_dreg (cfg, var->type);
6921 emit_init_rvar (cfg, reg, type);
6922 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6925 emit_init_rvar (cfg, var->dreg, type);
6927 emit_dummy_init_rvar (cfg, var->dreg, type);
6934 * Return the cost of inlining CMETHOD.
6937 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6938 guchar *ip, guint real_offset, gboolean inline_always)
6940 MonoInst *ins, *rvar = NULL;
6941 MonoMethodHeader *cheader;
6942 MonoBasicBlock *ebblock, *sbblock;
6944 MonoMethod *prev_inlined_method;
6945 MonoInst **prev_locals, **prev_args;
6946 MonoType **prev_arg_types;
6947 guint prev_real_offset;
6948 GHashTable *prev_cbb_hash;
6949 MonoBasicBlock **prev_cil_offset_to_bb;
6950 MonoBasicBlock *prev_cbb;
6951 unsigned char* prev_cil_start;
6952 guint32 prev_cil_offset_to_bb_len;
6953 MonoMethod *prev_current_method;
6954 MonoGenericContext *prev_generic_context;
6955 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6957 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6959 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6960 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6963 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6964 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6969 fsig = mono_method_signature (cmethod);
6971 if (cfg->verbose_level > 2)
6972 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6974 if (!cmethod->inline_info) {
6975 cfg->stat_inlineable_methods++;
6976 cmethod->inline_info = 1;
6979 /* allocate local variables */
6980 cheader = mono_method_get_header (cmethod);
6982 if (cheader == NULL || mono_loader_get_last_error ()) {
6983 MonoLoaderError *error = mono_loader_get_last_error ();
6986 mono_metadata_free_mh (cheader);
6987 if (inline_always && error)
6988 mono_cfg_set_exception (cfg, error->exception_type);
6990 mono_loader_clear_error ();
6994 /*Must verify before creating locals as it can cause the JIT to assert.*/
6995 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6996 mono_metadata_free_mh (cheader);
7000 /* allocate space to store the return value */
7001 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7002 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7005 prev_locals = cfg->locals;
7006 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7007 for (i = 0; i < cheader->num_locals; ++i)
7008 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7010 /* allocate start and end blocks */
7011 /* This is needed so if the inline is aborted, we can clean up */
7012 NEW_BBLOCK (cfg, sbblock);
7013 sbblock->real_offset = real_offset;
7015 NEW_BBLOCK (cfg, ebblock);
7016 ebblock->block_num = cfg->num_bblocks++;
7017 ebblock->real_offset = real_offset;
7019 prev_args = cfg->args;
7020 prev_arg_types = cfg->arg_types;
7021 prev_inlined_method = cfg->inlined_method;
7022 cfg->inlined_method = cmethod;
7023 cfg->ret_var_set = FALSE;
7024 cfg->inline_depth ++;
7025 prev_real_offset = cfg->real_offset;
7026 prev_cbb_hash = cfg->cbb_hash;
7027 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7028 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7029 prev_cil_start = cfg->cil_start;
7030 prev_cbb = cfg->cbb;
7031 prev_current_method = cfg->current_method;
7032 prev_generic_context = cfg->generic_context;
7033 prev_ret_var_set = cfg->ret_var_set;
7034 prev_disable_inline = cfg->disable_inline;
7036 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7039 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
7041 ret_var_set = cfg->ret_var_set;
7043 cfg->inlined_method = prev_inlined_method;
7044 cfg->real_offset = prev_real_offset;
7045 cfg->cbb_hash = prev_cbb_hash;
7046 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7047 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7048 cfg->cil_start = prev_cil_start;
7049 cfg->locals = prev_locals;
7050 cfg->args = prev_args;
7051 cfg->arg_types = prev_arg_types;
7052 cfg->current_method = prev_current_method;
7053 cfg->generic_context = prev_generic_context;
7054 cfg->ret_var_set = prev_ret_var_set;
7055 cfg->disable_inline = prev_disable_inline;
7056 cfg->inline_depth --;
7058 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7059 if (cfg->verbose_level > 2)
7060 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7062 cfg->stat_inlined_methods++;
7064 /* always add some code to avoid block split failures */
7065 MONO_INST_NEW (cfg, ins, OP_NOP);
7066 MONO_ADD_INS (prev_cbb, ins);
7068 prev_cbb->next_bb = sbblock;
7069 link_bblock (cfg, prev_cbb, sbblock);
7072 * Get rid of the begin and end bblocks if possible to aid local
7075 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7077 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7078 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7080 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7081 MonoBasicBlock *prev = ebblock->in_bb [0];
7082 mono_merge_basic_blocks (cfg, prev, ebblock);
7084 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7085 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7086 cfg->cbb = prev_cbb;
7090 * Its possible that the rvar is set in some prev bblock, but not in others.
7096 for (i = 0; i < ebblock->in_count; ++i) {
7097 bb = ebblock->in_bb [i];
7099 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7102 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7112 * If the inlined method contains only a throw, then the ret var is not
7113 * set, so set it to a dummy value.
7116 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7118 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7121 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7124 if (cfg->verbose_level > 2)
7125 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7126 cfg->exception_type = MONO_EXCEPTION_NONE;
7127 mono_loader_clear_error ();
7129 /* This gets rid of the newly added bblocks */
7130 cfg->cbb = prev_cbb;
7132 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7137 * Some of these comments may well be out-of-date.
7138 * Design decisions: we do a single pass over the IL code (and we do bblock
7139 * splitting/merging in the few cases when it's required: a back jump to an IL
7140 * address that was not already seen as bblock starting point).
7141 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7142 * Complex operations are decomposed in simpler ones right away. We need to let the
7143 * arch-specific code peek and poke inside this process somehow (except when the
7144 * optimizations can take advantage of the full semantic info of coarse opcodes).
7145 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7146 * MonoInst->opcode initially is the IL opcode or some simplification of that
7147 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7148 * opcode with value bigger than OP_LAST.
7149 * At this point the IR can be handed over to an interpreter, a dumb code generator
7150 * or to the optimizing code generator that will translate it to SSA form.
7152 * Profiling directed optimizations.
7153 * We may compile by default with few or no optimizations and instrument the code
7154 * or the user may indicate what methods to optimize the most either in a config file
7155 * or through repeated runs where the compiler applies offline the optimizations to
7156 * each method and then decides if it was worth it.
7159 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7160 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7161 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7162 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7163 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7164 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7165 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7166 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7168 /* offset from br.s -> br like opcodes */
7169 #define BIG_BRANCH_OFFSET 13
7172 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7174 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7176 return b == NULL || b == bb;
7180 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7182 unsigned char *ip = start;
7183 unsigned char *target;
7186 MonoBasicBlock *bblock;
7187 const MonoOpcode *opcode;
7190 cli_addr = ip - start;
7191 i = mono_opcode_value ((const guint8 **)&ip, end);
7194 opcode = &mono_opcodes [i];
7195 switch (opcode->argument) {
7196 case MonoInlineNone:
7199 case MonoInlineString:
7200 case MonoInlineType:
7201 case MonoInlineField:
7202 case MonoInlineMethod:
7205 case MonoShortInlineR:
7212 case MonoShortInlineVar:
7213 case MonoShortInlineI:
7216 case MonoShortInlineBrTarget:
7217 target = start + cli_addr + 2 + (signed char)ip [1];
7218 GET_BBLOCK (cfg, bblock, target);
7221 GET_BBLOCK (cfg, bblock, ip);
7223 case MonoInlineBrTarget:
7224 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7225 GET_BBLOCK (cfg, bblock, target);
7228 GET_BBLOCK (cfg, bblock, ip);
7230 case MonoInlineSwitch: {
7231 guint32 n = read32 (ip + 1);
7234 cli_addr += 5 + 4 * n;
7235 target = start + cli_addr;
7236 GET_BBLOCK (cfg, bblock, target);
7238 for (j = 0; j < n; ++j) {
7239 target = start + cli_addr + (gint32)read32 (ip);
7240 GET_BBLOCK (cfg, bblock, target);
7250 g_assert_not_reached ();
7253 if (i == CEE_THROW) {
7254 unsigned char *bb_start = ip - 1;
7256 /* Find the start of the bblock containing the throw */
7258 while ((bb_start >= start) && !bblock) {
7259 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7263 bblock->out_of_line = 1;
7273 static inline MonoMethod *
7274 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7278 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7279 method = mono_method_get_wrapper_data (m, token);
7282 method = mono_class_inflate_generic_method_checked (method, context, &error);
7283 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7286 method = mono_get_method_full (m->klass->image, token, klass, context);
7292 static inline MonoMethod *
7293 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7295 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7297 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7303 static inline MonoClass*
7304 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7309 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7310 klass = mono_method_get_wrapper_data (method, token);
7312 klass = mono_class_inflate_generic_class (klass, context);
7314 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7315 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7318 mono_class_init (klass);
7322 static inline MonoMethodSignature*
7323 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7325 MonoMethodSignature *fsig;
7327 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7328 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7330 fsig = mono_metadata_parse_signature (method->klass->image, token);
7334 fsig = mono_inflate_generic_signature(fsig, context, &error);
7336 g_assert(mono_error_ok(&error));
7342 throw_exception (void)
7344 static MonoMethod *method = NULL;
7347 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7348 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7355 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7357 MonoMethod *thrower = throw_exception ();
7360 EMIT_NEW_PCONST (cfg, args [0], ex);
7361 mono_emit_method_call (cfg, thrower, args, NULL);
7365 * Return the original method is a wrapper is specified. We can only access
7366 * the custom attributes from the original method.
7369 get_original_method (MonoMethod *method)
7371 if (method->wrapper_type == MONO_WRAPPER_NONE)
7374 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7375 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7378 /* in other cases we need to find the original method */
7379 return mono_marshal_method_from_wrapper (method);
7383 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7385 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7386 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7388 emit_throw_exception (cfg, ex);
7392 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7394 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7395 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7397 emit_throw_exception (cfg, ex);
7401 * Check that the IL instructions at ip are the array initialization
7402 * sequence and return the pointer to the data and the size.
7405 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7408 * newarr[System.Int32]
7410 * ldtoken field valuetype ...
7411 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7413 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7415 guint32 token = read32 (ip + 7);
7416 guint32 field_token = read32 (ip + 2);
7417 guint32 field_index = field_token & 0xffffff;
7419 const char *data_ptr;
7421 MonoMethod *cmethod;
7422 MonoClass *dummy_class;
7423 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7427 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7431 *out_field_token = field_token;
7433 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7436 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7438 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7439 case MONO_TYPE_BOOLEAN:
7443 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7444 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7445 case MONO_TYPE_CHAR:
7462 if (size > mono_type_size (field->type, &dummy_align))
7465 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7466 if (!image_is_dynamic (method->klass->image)) {
7467 field_index = read32 (ip + 2) & 0xffffff;
7468 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7469 data_ptr = mono_image_rva_map (method->klass->image, rva);
7470 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7471 /* for aot code we do the lookup on load */
7472 if (aot && data_ptr)
7473 return GUINT_TO_POINTER (rva);
7475 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7477 data_ptr = mono_field_get_data (field);
7485 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7487 char *method_fname = mono_method_full_name (method, TRUE);
7489 MonoMethodHeader *header = mono_method_get_header (method);
7491 if (header->code_size == 0)
7492 method_code = g_strdup ("method body is empty.");
7494 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7495 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7496 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7497 g_free (method_fname);
7498 g_free (method_code);
7499 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7503 set_exception_object (MonoCompile *cfg, MonoException *exception)
7505 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7506 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr, MONO_ROOT_SOURCE_JIT, "jit exception");
7507 cfg->exception_ptr = exception;
7511 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7514 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7515 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7516 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7517 /* Optimize reg-reg moves away */
7519 * Can't optimize other opcodes, since sp[0] might point to
7520 * the last ins of a decomposed opcode.
7522 sp [0]->dreg = (cfg)->locals [n]->dreg;
7524 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7529 * ldloca inhibits many optimizations so try to get rid of it in common
7532 static inline unsigned char *
7533 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7543 local = read16 (ip + 2);
7547 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7548 /* From the INITOBJ case */
7549 token = read32 (ip + 2);
7550 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7551 CHECK_TYPELOAD (klass);
7552 type = mini_get_underlying_type (&klass->byval_arg);
7553 emit_init_local (cfg, local, type, TRUE);
7561 emit_runtime_constant (MonoCompile *cfg, MonoInst **ins, MonoJumpInfoType patch_type)
7563 if (cfg->compile_aot) {
7564 EMIT_NEW_AOTCONST (cfg, *ins, patch_type, NULL);
7569 ji.type = patch_type;
7570 target = mono_resolve_patch_target (NULL, NULL, NULL, &ji, FALSE);
7572 EMIT_NEW_PCONST (cfg, *ins, target);
7577 is_exception_class (MonoClass *klass)
7580 if (klass == mono_defaults.exception_class)
7582 klass = klass->parent;
7588 * is_jit_optimizer_disabled:
7590 * Determine whenever M's assembly has a DebuggableAttribute with the
7591 * IsJITOptimizerDisabled flag set.
7594 is_jit_optimizer_disabled (MonoMethod *m)
7596 MonoAssembly *ass = m->klass->image->assembly;
7597 MonoCustomAttrInfo* attrs;
7598 static MonoClass *klass;
7600 gboolean val = FALSE;
7603 if (ass->jit_optimizer_disabled_inited)
7604 return ass->jit_optimizer_disabled;
7607 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7610 ass->jit_optimizer_disabled = FALSE;
7611 mono_memory_barrier ();
7612 ass->jit_optimizer_disabled_inited = TRUE;
7616 attrs = mono_custom_attrs_from_assembly (ass);
7618 for (i = 0; i < attrs->num_attrs; ++i) {
7619 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7621 MonoMethodSignature *sig;
7623 if (!attr->ctor || attr->ctor->klass != klass)
7625 /* Decode the attribute. See reflection.c */
7626 p = (const char*)attr->data;
7627 g_assert (read16 (p) == 0x0001);
7630 // FIXME: Support named parameters
7631 sig = mono_method_signature (attr->ctor);
7632 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7634 /* Two boolean arguments */
7638 mono_custom_attrs_free (attrs);
7641 ass->jit_optimizer_disabled = val;
7642 mono_memory_barrier ();
7643 ass->jit_optimizer_disabled_inited = TRUE;
7649 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7651 gboolean supported_tail_call;
7654 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7656 for (i = 0; i < fsig->param_count; ++i) {
7657 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7658 /* These can point to the current method's stack */
7659 supported_tail_call = FALSE;
7661 if (fsig->hasthis && cmethod->klass->valuetype)
7662 /* this might point to the current method's stack */
7663 supported_tail_call = FALSE;
7664 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7665 supported_tail_call = FALSE;
7666 if (cfg->method->save_lmf)
7667 supported_tail_call = FALSE;
7668 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7669 supported_tail_call = FALSE;
7670 if (call_opcode != CEE_CALL)
7671 supported_tail_call = FALSE;
7673 /* Debugging support */
7675 if (supported_tail_call) {
7676 if (!mono_debug_count ())
7677 supported_tail_call = FALSE;
7681 return supported_tail_call;
7687 * Handle calls made to ctors from NEWOBJ opcodes.
7690 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7691 MonoInst **sp, guint8 *ip, int *inline_costs)
7693 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7695 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7696 mono_method_is_generic_sharable (cmethod, TRUE)) {
7697 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7698 mono_class_vtable (cfg->domain, cmethod->klass);
7699 CHECK_TYPELOAD (cmethod->klass);
7701 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7702 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7705 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7706 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7708 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7710 CHECK_TYPELOAD (cmethod->klass);
7711 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7716 /* Avoid virtual calls to ctors if possible */
7717 if (mono_class_is_marshalbyref (cmethod->klass))
7718 callvirt_this_arg = sp [0];
7720 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7721 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7722 CHECK_CFG_EXCEPTION;
7723 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7724 mono_method_check_inlining (cfg, cmethod) &&
7725 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7728 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7729 cfg->real_offset += 5;
7731 *inline_costs += costs - 5;
7733 INLINE_FAILURE ("inline failure");
7734 // FIXME-VT: Clean this up
7735 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7736 GSHAREDVT_FAILURE(*ip);
7737 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7739 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7742 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7743 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7744 } else if (context_used &&
7745 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7746 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7747 MonoInst *cmethod_addr;
7749 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7751 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7752 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7754 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7756 INLINE_FAILURE ("ctor call");
7757 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7758 callvirt_this_arg, NULL, vtable_arg);
7765 emit_setret (MonoCompile *cfg, MonoInst *val)
7767 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7770 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7773 if (!cfg->vret_addr) {
7774 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7776 EMIT_NEW_RETLOADA (cfg, ret_addr);
7778 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7779 ins->klass = mono_class_from_mono_type (ret_type);
7782 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7783 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7784 MonoInst *iargs [1];
7788 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7789 mono_arch_emit_setret (cfg, cfg->method, conv);
7791 mono_arch_emit_setret (cfg, cfg->method, val);
7794 mono_arch_emit_setret (cfg, cfg->method, val);
7799 static MonoMethodSignature*
7800 sig_to_rgctx_sig (MonoMethodSignature *sig)
7802 // FIXME: memory allocation
7803 MonoMethodSignature *res;
7806 res = g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
7807 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
7808 res->param_count = sig->param_count + 1;
7809 for (i = 0; i < sig->param_count; ++i)
7810 res->params [i] = sig->params [i];
7811 res->params [sig->param_count] = &mono_defaults.int_class->byval_arg;
7816 * mono_method_to_ir:
7818 * Translate the .net IL into linear IR.
7821 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7822 MonoInst *return_var, MonoInst **inline_args,
7823 guint inline_offset, gboolean is_virtual_call)
7826 MonoInst *ins, **sp, **stack_start;
7827 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7828 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7829 MonoMethod *cmethod, *method_definition;
7830 MonoInst **arg_array;
7831 MonoMethodHeader *header;
7833 guint32 token, ins_flag;
7835 MonoClass *constrained_class = NULL;
7836 unsigned char *ip, *end, *target, *err_pos;
7837 MonoMethodSignature *sig;
7838 MonoGenericContext *generic_context = NULL;
7839 MonoGenericContainer *generic_container = NULL;
7840 MonoType **param_types;
7841 int i, n, start_new_bblock, dreg;
7842 int num_calls = 0, inline_costs = 0;
7843 int breakpoint_id = 0;
7845 GSList *class_inits = NULL;
7846 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7848 gboolean init_locals, seq_points, skip_dead_blocks;
7849 gboolean sym_seq_points = FALSE;
7850 MonoDebugMethodInfo *minfo;
7851 MonoBitSet *seq_point_locs = NULL;
7852 MonoBitSet *seq_point_set_locs = NULL;
7854 cfg->disable_inline = is_jit_optimizer_disabled (method);
7856 /* serialization and xdomain stuff may need access to private fields and methods */
7857 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7858 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7859 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7860 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7861 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7862 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7864 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7865 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7866 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7867 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7868 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7870 image = method->klass->image;
7871 header = mono_method_get_header (method);
7873 MonoLoaderError *error;
7875 if ((error = mono_loader_get_last_error ())) {
7876 mono_cfg_set_exception (cfg, error->exception_type);
7878 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7879 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7881 goto exception_exit;
7883 generic_container = mono_method_get_generic_container (method);
7884 sig = mono_method_signature (method);
7885 num_args = sig->hasthis + sig->param_count;
7886 ip = (unsigned char*)header->code;
7887 cfg->cil_start = ip;
7888 end = ip + header->code_size;
7889 cfg->stat_cil_code_size += header->code_size;
7891 seq_points = cfg->gen_seq_points && cfg->method == method;
7893 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7894 /* We could hit a seq point before attaching to the JIT (#8338) */
7898 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7899 minfo = mono_debug_lookup_method (method);
7901 MonoSymSeqPoint *sps;
7902 int i, n_il_offsets;
7904 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7905 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7906 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7907 sym_seq_points = TRUE;
7908 for (i = 0; i < n_il_offsets; ++i) {
7909 if (sps [i].il_offset < header->code_size)
7910 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7913 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7914 /* Methods without line number info like auto-generated property accessors */
7915 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7916 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7917 sym_seq_points = TRUE;
7922 * Methods without init_locals set could cause asserts in various passes
7923 * (#497220). To work around this, we emit dummy initialization opcodes
7924 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7925 * on some platforms.
7927 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7928 init_locals = header->init_locals;
7932 method_definition = method;
7933 while (method_definition->is_inflated) {
7934 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7935 method_definition = imethod->declaring;
7938 /* SkipVerification is not allowed if core-clr is enabled */
7939 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7941 dont_verify_stloc = TRUE;
7944 if (sig->is_inflated)
7945 generic_context = mono_method_get_context (method);
7946 else if (generic_container)
7947 generic_context = &generic_container->context;
7948 cfg->generic_context = generic_context;
7951 g_assert (!sig->has_type_parameters);
7953 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7954 g_assert (method->is_inflated);
7955 g_assert (mono_method_get_context (method)->method_inst);
7957 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7958 g_assert (sig->generic_param_count);
7960 if (cfg->method == method) {
7961 cfg->real_offset = 0;
7963 cfg->real_offset = inline_offset;
7966 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7967 cfg->cil_offset_to_bb_len = header->code_size;
7969 cfg->current_method = method;
7971 if (cfg->verbose_level > 2)
7972 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7974 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7976 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7977 for (n = 0; n < sig->param_count; ++n)
7978 param_types [n + sig->hasthis] = sig->params [n];
7979 cfg->arg_types = param_types;
7981 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7982 if (cfg->method == method) {
7984 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7985 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7988 NEW_BBLOCK (cfg, start_bblock);
7989 cfg->bb_entry = start_bblock;
7990 start_bblock->cil_code = NULL;
7991 start_bblock->cil_length = 0;
7994 NEW_BBLOCK (cfg, end_bblock);
7995 cfg->bb_exit = end_bblock;
7996 end_bblock->cil_code = NULL;
7997 end_bblock->cil_length = 0;
7998 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7999 g_assert (cfg->num_bblocks == 2);
8001 arg_array = cfg->args;
8003 if (header->num_clauses) {
8004 cfg->spvars = g_hash_table_new (NULL, NULL);
8005 cfg->exvars = g_hash_table_new (NULL, NULL);
8007 /* handle exception clauses */
8008 for (i = 0; i < header->num_clauses; ++i) {
8009 MonoBasicBlock *try_bb;
8010 MonoExceptionClause *clause = &header->clauses [i];
8011 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8013 try_bb->real_offset = clause->try_offset;
8014 try_bb->try_start = TRUE;
8015 try_bb->region = ((i + 1) << 8) | clause->flags;
8016 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8017 tblock->real_offset = clause->handler_offset;
8018 tblock->flags |= BB_EXCEPTION_HANDLER;
8021 * Linking the try block with the EH block hinders inlining as we won't be able to
8022 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8024 if (COMPILE_LLVM (cfg))
8025 link_bblock (cfg, try_bb, tblock);
8027 if (*(ip + clause->handler_offset) == CEE_POP)
8028 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8030 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8031 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8032 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8033 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8034 MONO_ADD_INS (tblock, ins);
8036 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8037 /* finally clauses already have a seq point */
8038 /* seq points for filter clauses are emitted below */
8039 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8040 MONO_ADD_INS (tblock, ins);
8043 /* todo: is a fault block unsafe to optimize? */
8044 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8045 tblock->flags |= BB_EXCEPTION_UNSAFE;
8048 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8050 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8052 /* catch and filter blocks get the exception object on the stack */
8053 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8054 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8056 /* mostly like handle_stack_args (), but just sets the input args */
8057 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8058 tblock->in_scount = 1;
8059 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8060 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8064 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8065 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8066 if (!cfg->compile_llvm) {
8067 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8068 ins->dreg = tblock->in_stack [0]->dreg;
8069 MONO_ADD_INS (tblock, ins);
8072 MonoInst *dummy_use;
8075 * Add a dummy use for the exvar so its liveness info will be
8078 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8081 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8082 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8083 MONO_ADD_INS (tblock, ins);
8086 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8087 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8088 tblock->flags |= BB_EXCEPTION_HANDLER;
8089 tblock->real_offset = clause->data.filter_offset;
8090 tblock->in_scount = 1;
8091 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8092 /* The filter block shares the exvar with the handler block */
8093 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8094 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8095 MONO_ADD_INS (tblock, ins);
8099 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8100 clause->data.catch_class &&
8102 mono_class_check_context_used (clause->data.catch_class)) {
8104 * In shared generic code with catch
8105 * clauses containing type variables
8106 * the exception handling code has to
8107 * be able to get to the rgctx.
8108 * Therefore we have to make sure that
8109 * the vtable/mrgctx argument (for
8110 * static or generic methods) or the
8111 * "this" argument (for non-static
8112 * methods) are live.
8114 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8115 mini_method_get_context (method)->method_inst ||
8116 method->klass->valuetype) {
8117 mono_get_vtable_var (cfg);
8119 MonoInst *dummy_use;
8121 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8126 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8127 cfg->cbb = start_bblock;
8128 cfg->args = arg_array;
8129 mono_save_args (cfg, sig, inline_args);
8132 /* FIRST CODE BLOCK */
8133 NEW_BBLOCK (cfg, tblock);
8134 tblock->cil_code = ip;
8138 ADD_BBLOCK (cfg, tblock);
8140 if (cfg->method == method) {
8141 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8142 if (breakpoint_id) {
8143 MONO_INST_NEW (cfg, ins, OP_BREAK);
8144 MONO_ADD_INS (cfg->cbb, ins);
8148 /* we use a separate basic block for the initialization code */
8149 NEW_BBLOCK (cfg, init_localsbb);
8150 cfg->bb_init = init_localsbb;
8151 init_localsbb->real_offset = cfg->real_offset;
8152 start_bblock->next_bb = init_localsbb;
8153 init_localsbb->next_bb = cfg->cbb;
8154 link_bblock (cfg, start_bblock, init_localsbb);
8155 link_bblock (cfg, init_localsbb, cfg->cbb);
8157 cfg->cbb = init_localsbb;
8159 if (cfg->gsharedvt && cfg->method == method) {
8160 MonoGSharedVtMethodInfo *info;
8161 MonoInst *var, *locals_var;
8164 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8165 info->method = cfg->method;
8166 info->count_entries = 16;
8167 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8168 cfg->gsharedvt_info = info;
8170 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8171 /* prevent it from being register allocated */
8172 //var->flags |= MONO_INST_VOLATILE;
8173 cfg->gsharedvt_info_var = var;
8175 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8176 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8178 /* Allocate locals */
8179 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8180 /* prevent it from being register allocated */
8181 //locals_var->flags |= MONO_INST_VOLATILE;
8182 cfg->gsharedvt_locals_var = locals_var;
8184 dreg = alloc_ireg (cfg);
8185 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8187 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8188 ins->dreg = locals_var->dreg;
8190 MONO_ADD_INS (cfg->cbb, ins);
8191 cfg->gsharedvt_locals_var_ins = ins;
8193 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8196 ins->flags |= MONO_INST_INIT;
8200 if (mono_security_core_clr_enabled ()) {
8201 /* check if this is native code, e.g. an icall or a p/invoke */
8202 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8203 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8205 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8206 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8208 /* if this ia a native call then it can only be JITted from platform code */
8209 if ((icall || pinvk) && method->klass && method->klass->image) {
8210 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8211 MonoException *ex = icall ? mono_get_exception_security () :
8212 mono_get_exception_method_access ();
8213 emit_throw_exception (cfg, ex);
8220 CHECK_CFG_EXCEPTION;
8222 if (header->code_size == 0)
8225 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8230 if (cfg->method == method)
8231 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8233 for (n = 0; n < header->num_locals; ++n) {
8234 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8239 /* We force the vtable variable here for all shared methods
8240 for the possibility that they might show up in a stack
8241 trace where their exact instantiation is needed. */
8242 if (cfg->gshared && method == cfg->method) {
8243 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8244 mini_method_get_context (method)->method_inst ||
8245 method->klass->valuetype) {
8246 mono_get_vtable_var (cfg);
8248 /* FIXME: Is there a better way to do this?
8249 We need the variable live for the duration
8250 of the whole method. */
8251 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8255 /* add a check for this != NULL to inlined methods */
8256 if (is_virtual_call) {
8259 NEW_ARGLOAD (cfg, arg_ins, 0);
8260 MONO_ADD_INS (cfg->cbb, arg_ins);
8261 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8264 skip_dead_blocks = !dont_verify;
8265 if (skip_dead_blocks) {
8266 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8271 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8272 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8275 start_new_bblock = 0;
8277 if (cfg->method == method)
8278 cfg->real_offset = ip - header->code;
8280 cfg->real_offset = inline_offset;
8285 if (start_new_bblock) {
8286 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8287 if (start_new_bblock == 2) {
8288 g_assert (ip == tblock->cil_code);
8290 GET_BBLOCK (cfg, tblock, ip);
8292 cfg->cbb->next_bb = tblock;
8294 start_new_bblock = 0;
8295 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8296 if (cfg->verbose_level > 3)
8297 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8298 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8302 g_slist_free (class_inits);
8305 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8306 link_bblock (cfg, cfg->cbb, tblock);
8307 if (sp != stack_start) {
8308 handle_stack_args (cfg, stack_start, sp - stack_start);
8310 CHECK_UNVERIFIABLE (cfg);
8312 cfg->cbb->next_bb = tblock;
8314 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8315 if (cfg->verbose_level > 3)
8316 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8317 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8320 g_slist_free (class_inits);
8325 if (skip_dead_blocks) {
8326 int ip_offset = ip - header->code;
8328 if (ip_offset == bb->end)
8332 int op_size = mono_opcode_size (ip, end);
8333 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8335 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8337 if (ip_offset + op_size == bb->end) {
8338 MONO_INST_NEW (cfg, ins, OP_NOP);
8339 MONO_ADD_INS (cfg->cbb, ins);
8340 start_new_bblock = 1;
8348 * Sequence points are points where the debugger can place a breakpoint.
8349 * Currently, we generate these automatically at points where the IL
8352 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8354 * Make methods interruptable at the beginning, and at the targets of
8355 * backward branches.
8356 * Also, do this at the start of every bblock in methods with clauses too,
8357 * to be able to handle instructions with inprecise control flow like
8359 * Backward branches are handled at the end of method-to-ir ().
8361 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8362 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8364 /* Avoid sequence points on empty IL like .volatile */
8365 // FIXME: Enable this
8366 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8367 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8368 if ((sp != stack_start) && !sym_seq_point)
8369 ins->flags |= MONO_INST_NONEMPTY_STACK;
8370 MONO_ADD_INS (cfg->cbb, ins);
8373 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8376 cfg->cbb->real_offset = cfg->real_offset;
8378 if ((cfg->method == method) && cfg->coverage_info) {
8379 guint32 cil_offset = ip - header->code;
8380 cfg->coverage_info->data [cil_offset].cil_code = ip;
8382 /* TODO: Use an increment here */
8383 #if defined(TARGET_X86)
8384 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8385 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8387 MONO_ADD_INS (cfg->cbb, ins);
8389 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8390 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8394 if (cfg->verbose_level > 3)
8395 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8399 if (seq_points && !sym_seq_points && sp != stack_start) {
8401 * The C# compiler uses these nops to notify the JIT that it should
8402 * insert seq points.
8404 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8405 MONO_ADD_INS (cfg->cbb, ins);
8407 if (cfg->keep_cil_nops)
8408 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8410 MONO_INST_NEW (cfg, ins, OP_NOP);
8412 MONO_ADD_INS (cfg->cbb, ins);
8415 if (should_insert_brekpoint (cfg->method)) {
8416 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8418 MONO_INST_NEW (cfg, ins, OP_NOP);
8421 MONO_ADD_INS (cfg->cbb, ins);
8427 CHECK_STACK_OVF (1);
8428 n = (*ip)-CEE_LDARG_0;
8430 EMIT_NEW_ARGLOAD (cfg, ins, n);
8438 CHECK_STACK_OVF (1);
8439 n = (*ip)-CEE_LDLOC_0;
8441 EMIT_NEW_LOCLOAD (cfg, ins, n);
8450 n = (*ip)-CEE_STLOC_0;
8453 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8455 emit_stloc_ir (cfg, sp, header, n);
8462 CHECK_STACK_OVF (1);
8465 EMIT_NEW_ARGLOAD (cfg, ins, n);
8471 CHECK_STACK_OVF (1);
8474 NEW_ARGLOADA (cfg, ins, n);
8475 MONO_ADD_INS (cfg->cbb, ins);
8485 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8487 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8492 CHECK_STACK_OVF (1);
8495 EMIT_NEW_LOCLOAD (cfg, ins, n);
8499 case CEE_LDLOCA_S: {
8500 unsigned char *tmp_ip;
8502 CHECK_STACK_OVF (1);
8503 CHECK_LOCAL (ip [1]);
8505 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8511 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8520 CHECK_LOCAL (ip [1]);
8521 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8523 emit_stloc_ir (cfg, sp, header, ip [1]);
8528 CHECK_STACK_OVF (1);
8529 EMIT_NEW_PCONST (cfg, ins, NULL);
8530 ins->type = STACK_OBJ;
8535 CHECK_STACK_OVF (1);
8536 EMIT_NEW_ICONST (cfg, ins, -1);
8549 CHECK_STACK_OVF (1);
8550 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8556 CHECK_STACK_OVF (1);
8558 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8564 CHECK_STACK_OVF (1);
8565 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8571 CHECK_STACK_OVF (1);
8572 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8573 ins->type = STACK_I8;
8574 ins->dreg = alloc_dreg (cfg, STACK_I8);
8576 ins->inst_l = (gint64)read64 (ip);
8577 MONO_ADD_INS (cfg->cbb, ins);
8583 gboolean use_aotconst = FALSE;
8585 #ifdef TARGET_POWERPC
8586 /* FIXME: Clean this up */
8587 if (cfg->compile_aot)
8588 use_aotconst = TRUE;
8591 /* FIXME: we should really allocate this only late in the compilation process */
8592 f = mono_domain_alloc (cfg->domain, sizeof (float));
8594 CHECK_STACK_OVF (1);
8600 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8602 dreg = alloc_freg (cfg);
8603 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8604 ins->type = cfg->r4_stack_type;
8606 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8607 ins->type = cfg->r4_stack_type;
8608 ins->dreg = alloc_dreg (cfg, STACK_R8);
8610 MONO_ADD_INS (cfg->cbb, ins);
8620 gboolean use_aotconst = FALSE;
8622 #ifdef TARGET_POWERPC
8623 /* FIXME: Clean this up */
8624 if (cfg->compile_aot)
8625 use_aotconst = TRUE;
8628 /* FIXME: we should really allocate this only late in the compilation process */
8629 d = mono_domain_alloc (cfg->domain, sizeof (double));
8631 CHECK_STACK_OVF (1);
8637 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8639 dreg = alloc_freg (cfg);
8640 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8641 ins->type = STACK_R8;
8643 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8644 ins->type = STACK_R8;
8645 ins->dreg = alloc_dreg (cfg, STACK_R8);
8647 MONO_ADD_INS (cfg->cbb, ins);
8656 MonoInst *temp, *store;
8658 CHECK_STACK_OVF (1);
8662 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8663 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8665 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8668 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8681 if (sp [0]->type == STACK_R8)
8682 /* we need to pop the value from the x86 FP stack */
8683 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8688 MonoMethodSignature *fsig;
8691 INLINE_FAILURE ("jmp");
8692 GSHAREDVT_FAILURE (*ip);
8695 if (stack_start != sp)
8697 token = read32 (ip + 1);
8698 /* FIXME: check the signature matches */
8699 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8701 if (!cmethod || mono_loader_get_last_error ())
8704 if (cfg->gshared && mono_method_check_context_used (cmethod))
8705 GENERIC_SHARING_FAILURE (CEE_JMP);
8707 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8709 fsig = mono_method_signature (cmethod);
8710 n = fsig->param_count + fsig->hasthis;
8711 if (cfg->llvm_only) {
8714 args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8715 for (i = 0; i < n; ++i)
8716 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8717 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8719 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8720 * have to emit a normal return since llvm expects it.
8723 emit_setret (cfg, ins);
8724 MONO_INST_NEW (cfg, ins, OP_BR);
8725 ins->inst_target_bb = end_bblock;
8726 MONO_ADD_INS (cfg->cbb, ins);
8727 link_bblock (cfg, cfg->cbb, end_bblock);
8730 } else if (cfg->backend->have_op_tail_call) {
8731 /* Handle tail calls similarly to calls */
8734 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8735 call->method = cmethod;
8736 call->tail_call = TRUE;
8737 call->signature = mono_method_signature (cmethod);
8738 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8739 call->inst.inst_p0 = cmethod;
8740 for (i = 0; i < n; ++i)
8741 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8743 mono_arch_emit_call (cfg, call);
8744 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8745 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8747 for (i = 0; i < num_args; ++i)
8748 /* Prevent arguments from being optimized away */
8749 arg_array [i]->flags |= MONO_INST_VOLATILE;
8751 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8752 ins = (MonoInst*)call;
8753 ins->inst_p0 = cmethod;
8754 MONO_ADD_INS (cfg->cbb, ins);
8758 start_new_bblock = 1;
8763 MonoMethodSignature *fsig;
8766 token = read32 (ip + 1);
8770 //GSHAREDVT_FAILURE (*ip);
8775 fsig = mini_get_signature (method, token, generic_context);
8777 if (method->dynamic && fsig->pinvoke) {
8781 * This is a call through a function pointer using a pinvoke
8782 * signature. Have to create a wrapper and call that instead.
8783 * FIXME: This is very slow, need to create a wrapper at JIT time
8784 * instead based on the signature.
8786 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8787 EMIT_NEW_PCONST (cfg, args [1], fsig);
8789 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8792 n = fsig->param_count + fsig->hasthis;
8796 //g_assert (!virtual || fsig->hasthis);
8800 inline_costs += 10 * num_calls++;
8803 * Making generic calls out of gsharedvt methods.
8804 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8805 * patching gshared method addresses into a gsharedvt method.
8807 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8809 * We pass the address to the gsharedvt trampoline in the rgctx reg
8811 MonoInst *callee = addr;
8813 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8815 GSHAREDVT_FAILURE (*ip);
8817 addr = emit_get_rgctx_sig (cfg, context_used,
8818 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8819 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8823 /* Prevent inlining of methods with indirect calls */
8824 INLINE_FAILURE ("indirect call");
8826 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8831 * Instead of emitting an indirect call, emit a direct call
8832 * with the contents of the aotconst as the patch info.
8834 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8835 info_type = addr->inst_c1;
8836 info_data = addr->inst_p0;
8838 info_type = addr->inst_right->inst_c1;
8839 info_data = addr->inst_right->inst_left;
8842 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8843 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8848 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8852 /* End of call, INS should contain the result of the call, if any */
8854 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8856 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8859 CHECK_CFG_EXCEPTION;
8863 constrained_class = NULL;
8867 case CEE_CALLVIRT: {
8868 MonoInst *addr = NULL;
8869 MonoMethodSignature *fsig = NULL;
8871 int virtual = *ip == CEE_CALLVIRT;
8872 gboolean pass_imt_from_rgctx = FALSE;
8873 MonoInst *imt_arg = NULL;
8874 MonoInst *keep_this_alive = NULL;
8875 gboolean pass_vtable = FALSE;
8876 gboolean pass_mrgctx = FALSE;
8877 MonoInst *vtable_arg = NULL;
8878 gboolean check_this = FALSE;
8879 gboolean supported_tail_call = FALSE;
8880 gboolean tail_call = FALSE;
8881 gboolean need_seq_point = FALSE;
8882 guint32 call_opcode = *ip;
8883 gboolean emit_widen = TRUE;
8884 gboolean push_res = TRUE;
8885 gboolean skip_ret = FALSE;
8886 gboolean delegate_invoke = FALSE;
8887 gboolean direct_icall = FALSE;
8888 gboolean constrained_partial_call = FALSE;
8889 MonoMethod *cil_method;
8892 token = read32 (ip + 1);
8896 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8897 cil_method = cmethod;
8899 if (constrained_class) {
8900 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8901 if (!mini_is_gsharedvt_klass (constrained_class)) {
8902 g_assert (!cmethod->klass->valuetype);
8903 if (!mini_type_is_reference (&constrained_class->byval_arg))
8904 constrained_partial_call = TRUE;
8908 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8909 if (cfg->verbose_level > 2)
8910 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8911 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8912 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8914 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8918 if (cfg->verbose_level > 2)
8919 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8921 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8923 * This is needed since get_method_constrained can't find
8924 * the method in klass representing a type var.
8925 * The type var is guaranteed to be a reference type in this
8928 if (!mini_is_gsharedvt_klass (constrained_class))
8929 g_assert (!cmethod->klass->valuetype);
8931 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8937 if (!cmethod || mono_loader_get_last_error ())
8939 if (!dont_verify && !cfg->skip_visibility) {
8940 MonoMethod *target_method = cil_method;
8941 if (method->is_inflated) {
8942 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8944 if (!mono_method_can_access_method (method_definition, target_method) &&
8945 !mono_method_can_access_method (method, cil_method))
8946 METHOD_ACCESS_FAILURE (method, cil_method);
8949 if (mono_security_core_clr_enabled ())
8950 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8952 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8953 /* MS.NET seems to silently convert this to a callvirt */
8958 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8959 * converts to a callvirt.
8961 * tests/bug-515884.il is an example of this behavior
8963 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8964 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8965 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8969 if (!cmethod->klass->inited)
8970 if (!mono_class_init (cmethod->klass))
8971 TYPE_LOAD_ERROR (cmethod->klass);
8973 fsig = mono_method_signature (cmethod);
8976 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8977 mini_class_is_system_array (cmethod->klass)) {
8978 array_rank = cmethod->klass->rank;
8979 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8980 direct_icall = TRUE;
8981 } else if (fsig->pinvoke) {
8982 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8983 fsig = mono_method_signature (wrapper);
8984 } else if (constrained_class) {
8986 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8990 /* See code below */
8991 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8992 MonoBasicBlock *tbb;
8994 GET_BBLOCK (cfg, tbb, ip + 5);
8995 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8997 * We want to extend the try block to cover the call, but we can't do it if the
8998 * call is made directly since its followed by an exception check.
9000 direct_icall = FALSE;
9004 mono_save_token_info (cfg, image, token, cil_method);
9006 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9007 need_seq_point = TRUE;
9009 /* Don't support calls made using type arguments for now */
9011 if (cfg->gsharedvt) {
9012 if (mini_is_gsharedvt_signature (fsig))
9013 GSHAREDVT_FAILURE (*ip);
9017 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9018 g_assert_not_reached ();
9020 n = fsig->param_count + fsig->hasthis;
9022 if (!cfg->gshared && cmethod->klass->generic_container)
9026 g_assert (!mono_method_check_context_used (cmethod));
9030 //g_assert (!virtual || fsig->hasthis);
9034 if (constrained_class) {
9035 if (mini_is_gsharedvt_klass (constrained_class)) {
9036 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9037 /* The 'Own method' case below */
9038 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9039 /* 'The type parameter is instantiated as a reference type' case below. */
9041 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9042 CHECK_CFG_EXCEPTION;
9049 * We have the `constrained.' prefix opcode.
9051 if (constrained_partial_call) {
9052 gboolean need_box = TRUE;
9055 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9056 * called method is not known at compile time either. The called method could end up being
9057 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9058 * to box the receiver.
9059 * A simple solution would be to box always and make a normal virtual call, but that would
9060 * be bad performance wise.
9062 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9064 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9069 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9070 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9071 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9072 ins->klass = constrained_class;
9073 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9074 CHECK_CFG_EXCEPTION;
9075 } else if (need_box) {
9077 MonoBasicBlock *is_ref_bb, *end_bb;
9078 MonoInst *nonbox_call;
9081 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9083 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9084 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9086 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9088 NEW_BBLOCK (cfg, is_ref_bb);
9089 NEW_BBLOCK (cfg, end_bb);
9091 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9092 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
9093 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9096 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9098 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9101 MONO_START_BB (cfg, is_ref_bb);
9102 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9103 ins->klass = constrained_class;
9104 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9105 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9109 MONO_START_BB (cfg, end_bb);
9112 nonbox_call->dreg = ins->dreg;
9115 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9116 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9117 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9120 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9122 * The type parameter is instantiated as a valuetype,
9123 * but that type doesn't override the method we're
9124 * calling, so we need to box `this'.
9126 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9127 ins->klass = constrained_class;
9128 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9129 CHECK_CFG_EXCEPTION;
9130 } else if (!constrained_class->valuetype) {
9131 int dreg = alloc_ireg_ref (cfg);
9134 * The type parameter is instantiated as a reference
9135 * type. We have a managed pointer on the stack, so
9136 * we need to dereference it here.
9138 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9139 ins->type = STACK_OBJ;
9142 if (cmethod->klass->valuetype) {
9145 /* Interface method */
9148 mono_class_setup_vtable (constrained_class);
9149 CHECK_TYPELOAD (constrained_class);
9150 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9152 TYPE_LOAD_ERROR (constrained_class);
9153 slot = mono_method_get_vtable_slot (cmethod);
9155 TYPE_LOAD_ERROR (cmethod->klass);
9156 cmethod = constrained_class->vtable [ioffset + slot];
9158 if (cmethod->klass == mono_defaults.enum_class) {
9159 /* Enum implements some interfaces, so treat this as the first case */
9160 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9161 ins->klass = constrained_class;
9162 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9163 CHECK_CFG_EXCEPTION;
9168 constrained_class = NULL;
9171 if (check_call_signature (cfg, fsig, sp))
9174 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9175 delegate_invoke = TRUE;
9177 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9178 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9179 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9187 * If the callee is a shared method, then its static cctor
9188 * might not get called after the call was patched.
9190 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9191 emit_class_init (cfg, cmethod->klass);
9192 CHECK_TYPELOAD (cmethod->klass);
9195 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9198 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9200 context_used = mini_method_check_context_used (cfg, cmethod);
9202 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9203 /* Generic method interface
9204 calls are resolved via a
9205 helper function and don't
9207 if (!cmethod_context || !cmethod_context->method_inst)
9208 pass_imt_from_rgctx = TRUE;
9212 * If a shared method calls another
9213 * shared method then the caller must
9214 * have a generic sharing context
9215 * because the magic trampoline
9216 * requires it. FIXME: We shouldn't
9217 * have to force the vtable/mrgctx
9218 * variable here. Instead there
9219 * should be a flag in the cfg to
9220 * request a generic sharing context.
9223 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9224 mono_get_vtable_var (cfg);
9229 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9231 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9233 CHECK_TYPELOAD (cmethod->klass);
9234 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9239 g_assert (!vtable_arg);
9241 if (!cfg->compile_aot) {
9243 * emit_get_rgctx_method () calls mono_class_vtable () so check
9244 * for type load errors before.
9246 mono_class_setup_vtable (cmethod->klass);
9247 CHECK_TYPELOAD (cmethod->klass);
9250 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9252 /* !marshalbyref is needed to properly handle generic methods + remoting */
9253 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9254 MONO_METHOD_IS_FINAL (cmethod)) &&
9255 !mono_class_is_marshalbyref (cmethod->klass)) {
9262 if (pass_imt_from_rgctx) {
9263 g_assert (!pass_vtable);
9265 imt_arg = emit_get_rgctx_method (cfg, context_used,
9266 cmethod, MONO_RGCTX_INFO_METHOD);
9270 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9272 /* Calling virtual generic methods */
9273 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9274 !(MONO_METHOD_IS_FINAL (cmethod) &&
9275 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9276 fsig->generic_param_count &&
9277 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9279 MonoInst *this_temp, *this_arg_temp, *store;
9280 MonoInst *iargs [4];
9282 g_assert (fsig->is_inflated);
9284 /* Prevent inlining of methods that contain indirect calls */
9285 INLINE_FAILURE ("virtual generic call");
9287 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9288 GSHAREDVT_FAILURE (*ip);
9290 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9291 g_assert (!imt_arg);
9293 g_assert (cmethod->is_inflated);
9294 imt_arg = emit_get_rgctx_method (cfg, context_used,
9295 cmethod, MONO_RGCTX_INFO_METHOD);
9296 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9298 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9299 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9300 MONO_ADD_INS (cfg->cbb, store);
9302 /* FIXME: This should be a managed pointer */
9303 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9305 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9306 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9307 cmethod, MONO_RGCTX_INFO_METHOD);
9308 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9309 addr = mono_emit_jit_icall (cfg,
9310 mono_helper_compile_generic_method, iargs);
9312 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9314 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9321 * Implement a workaround for the inherent races involved in locking:
9327 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9328 * try block, the Exit () won't be executed, see:
9329 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9330 * To work around this, we extend such try blocks to include the last x bytes
9331 * of the Monitor.Enter () call.
9333 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9334 MonoBasicBlock *tbb;
9336 GET_BBLOCK (cfg, tbb, ip + 5);
9338 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9339 * from Monitor.Enter like ArgumentNullException.
9341 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9342 /* Mark this bblock as needing to be extended */
9343 tbb->extend_try_block = TRUE;
9347 /* Conversion to a JIT intrinsic */
9348 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9349 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9350 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9357 if ((cfg->opt & MONO_OPT_INLINE) &&
9358 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9359 mono_method_check_inlining (cfg, cmethod)) {
9361 gboolean always = FALSE;
9363 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9364 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9365 /* Prevent inlining of methods that call wrappers */
9366 INLINE_FAILURE ("wrapper call");
9367 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9371 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9373 cfg->real_offset += 5;
9375 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9376 /* *sp is already set by inline_method */
9381 inline_costs += costs;
9387 /* Tail recursion elimination */
9388 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9389 gboolean has_vtargs = FALSE;
9392 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9393 INLINE_FAILURE ("tail call");
9395 /* keep it simple */
9396 for (i = fsig->param_count - 1; i >= 0; i--) {
9397 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9402 for (i = 0; i < n; ++i)
9403 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9404 MONO_INST_NEW (cfg, ins, OP_BR);
9405 MONO_ADD_INS (cfg->cbb, ins);
9406 tblock = start_bblock->out_bb [0];
9407 link_bblock (cfg, cfg->cbb, tblock);
9408 ins->inst_target_bb = tblock;
9409 start_new_bblock = 1;
9411 /* skip the CEE_RET, too */
9412 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9419 inline_costs += 10 * num_calls++;
9422 * Making generic calls out of gsharedvt methods.
9423 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9424 * patching gshared method addresses into a gsharedvt method.
9426 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9427 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9428 MonoRgctxInfoType info_type;
9431 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9432 //GSHAREDVT_FAILURE (*ip);
9433 // disable for possible remoting calls
9434 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9435 GSHAREDVT_FAILURE (*ip);
9436 if (fsig->generic_param_count) {
9437 /* virtual generic call */
9438 g_assert (!imt_arg);
9439 /* Same as the virtual generic case above */
9440 imt_arg = emit_get_rgctx_method (cfg, context_used,
9441 cmethod, MONO_RGCTX_INFO_METHOD);
9442 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9444 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9445 /* This can happen when we call a fully instantiated iface method */
9446 imt_arg = emit_get_rgctx_method (cfg, context_used,
9447 cmethod, MONO_RGCTX_INFO_METHOD);
9452 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9453 keep_this_alive = sp [0];
9455 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9456 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9458 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9459 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9461 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9465 /* Generic sharing */
9468 * Use this if the callee is gsharedvt sharable too, since
9469 * at runtime we might find an instantiation so the call cannot
9470 * be patched (the 'no_patch' code path in mini-trampolines.c).
9472 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9473 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9474 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9475 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9476 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9477 INLINE_FAILURE ("gshared");
9479 g_assert (cfg->gshared && cmethod);
9483 * We are compiling a call to a
9484 * generic method from shared code,
9485 * which means that we have to look up
9486 * the method in the rgctx and do an
9490 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9492 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9493 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9497 /* Direct calls to icalls */
9499 MonoMethod *wrapper;
9502 /* Inline the wrapper */
9503 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9505 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9506 g_assert (costs > 0);
9507 cfg->real_offset += 5;
9509 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9510 /* *sp is already set by inline_method */
9515 inline_costs += costs;
9524 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9525 MonoInst *val = sp [fsig->param_count];
9527 if (val->type == STACK_OBJ) {
9528 MonoInst *iargs [2];
9533 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9536 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9537 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9538 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9539 emit_write_barrier (cfg, addr, val);
9540 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9541 GSHAREDVT_FAILURE (*ip);
9542 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9543 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9545 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9546 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9547 if (!cmethod->klass->element_class->valuetype && !readonly)
9548 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9549 CHECK_TYPELOAD (cmethod->klass);
9552 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9555 g_assert_not_reached ();
9562 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9566 /* Tail prefix / tail call optimization */
9568 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9569 /* FIXME: runtime generic context pointer for jumps? */
9570 /* FIXME: handle this for generic sharing eventually */
9571 if ((ins_flag & MONO_INST_TAILCALL) &&
9572 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9573 supported_tail_call = TRUE;
9575 if (supported_tail_call) {
9578 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9579 INLINE_FAILURE ("tail call");
9581 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9583 if (cfg->backend->have_op_tail_call) {
9584 /* Handle tail calls similarly to normal calls */
9587 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9589 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9590 call->tail_call = TRUE;
9591 call->method = cmethod;
9592 call->signature = mono_method_signature (cmethod);
9595 * We implement tail calls by storing the actual arguments into the
9596 * argument variables, then emitting a CEE_JMP.
9598 for (i = 0; i < n; ++i) {
9599 /* Prevent argument from being register allocated */
9600 arg_array [i]->flags |= MONO_INST_VOLATILE;
9601 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9603 ins = (MonoInst*)call;
9604 ins->inst_p0 = cmethod;
9605 ins->inst_p1 = arg_array [0];
9606 MONO_ADD_INS (cfg->cbb, ins);
9607 link_bblock (cfg, cfg->cbb, end_bblock);
9608 start_new_bblock = 1;
9610 // FIXME: Eliminate unreachable epilogs
9613 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9614 * only reachable from this call.
9616 GET_BBLOCK (cfg, tblock, ip + 5);
9617 if (tblock == cfg->cbb || tblock->in_count == 0)
9626 * Synchronized wrappers.
9627 * Its hard to determine where to replace a method with its synchronized
9628 * wrapper without causing an infinite recursion. The current solution is
9629 * to add the synchronized wrapper in the trampolines, and to
9630 * change the called method to a dummy wrapper, and resolve that wrapper
9631 * to the real method in mono_jit_compile_method ().
9633 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9634 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9635 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9636 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9640 * Interface calls in llvm-only mode are complicated becase the callee might need an rgctx arg,
9641 * (i.e. its a vtype method), and there is no way to for the caller to know this at compile time.
9642 * So we make resolve_iface_call return the rgctx, and do two calls with different signatures
9643 * based on whenever there is an rgctx or not.
9645 if (cfg->llvm_only && virtual && cmethod && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9646 MonoInst *args_buf [16], *icall_args [16];
9648 MonoBasicBlock *rgctx_bb, *end_bb;
9649 MonoInst *call1, *call2, *call_target;
9650 MonoMethodSignature *rgctx_sig;
9651 int rgctx_reg, tmp_reg;
9653 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9655 NEW_BBLOCK (cfg, rgctx_bb);
9656 NEW_BBLOCK (cfg, end_bb);
9658 // FIXME: Optimize this
9660 guint32 imt_slot = mono_method_get_imt_slot (cmethod);
9662 icall_args [0] = sp [0];
9663 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
9665 icall_args [2] = imt_arg;
9667 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, cmethod);
9668 icall_args [2] = ins;
9671 rgctx_reg = alloc_preg (cfg);
9672 MONO_EMIT_NEW_PCONST (cfg, rgctx_reg, NULL);
9673 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], rgctx_reg, &mono_defaults.int_class->byval_arg);
9674 //EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
9676 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
9678 // FIXME: Only do this if needed (generic calls)
9680 // Check whenever to pass an rgctx
9681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
9682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, rgctx_bb);
9683 /* Non rgctx case */
9684 call1 = mono_emit_calli (cfg, fsig, sp, call_target, NULL, vtable_arg);
9685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9687 MONO_START_BB (cfg, rgctx_bb);
9688 /* Make a call with an rgctx */
9689 if (fsig->param_count + 2 < 16)
9692 args = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
9694 for (i = 0; i < fsig->param_count; ++i)
9695 args [i + 1] = sp [i + 1];
9696 tmp_reg = alloc_preg (cfg);
9697 EMIT_NEW_UNALU (cfg, args [fsig->param_count + 1], OP_MOVE, tmp_reg, rgctx_reg);
9698 rgctx_sig = sig_to_rgctx_sig (fsig);
9699 call2 = mono_emit_calli (cfg, rgctx_sig, args, call_target, NULL, NULL);
9700 call2->dreg = call1->dreg;
9701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9703 MONO_START_BB (cfg, end_bb);
9709 INLINE_FAILURE ("call");
9710 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9711 imt_arg, vtable_arg);
9713 if (tail_call && !cfg->llvm_only) {
9714 link_bblock (cfg, cfg->cbb, end_bblock);
9715 start_new_bblock = 1;
9717 // FIXME: Eliminate unreachable epilogs
9720 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9721 * only reachable from this call.
9723 GET_BBLOCK (cfg, tblock, ip + 5);
9724 if (tblock == cfg->cbb || tblock->in_count == 0)
9731 /* End of call, INS should contain the result of the call, if any */
9733 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9736 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9741 if (keep_this_alive) {
9742 MonoInst *dummy_use;
9744 /* See mono_emit_method_call_full () */
9745 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9748 CHECK_CFG_EXCEPTION;
9752 g_assert (*ip == CEE_RET);
9756 constrained_class = NULL;
9758 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9762 if (cfg->method != method) {
9763 /* return from inlined method */
9765 * If in_count == 0, that means the ret is unreachable due to
9766 * being preceeded by a throw. In that case, inline_method () will
9767 * handle setting the return value
9768 * (test case: test_0_inline_throw ()).
9770 if (return_var && cfg->cbb->in_count) {
9771 MonoType *ret_type = mono_method_signature (method)->ret;
9777 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9780 //g_assert (returnvar != -1);
9781 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9782 cfg->ret_var_set = TRUE;
9785 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9787 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9791 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9793 if (seq_points && !sym_seq_points) {
9795 * Place a seq point here too even through the IL stack is not
9796 * empty, so a step over on
9799 * will work correctly.
9801 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9802 MONO_ADD_INS (cfg->cbb, ins);
9805 g_assert (!return_var);
9809 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9812 emit_setret (cfg, *sp);
9815 if (sp != stack_start)
9817 MONO_INST_NEW (cfg, ins, OP_BR);
9819 ins->inst_target_bb = end_bblock;
9820 MONO_ADD_INS (cfg->cbb, ins);
9821 link_bblock (cfg, cfg->cbb, end_bblock);
9822 start_new_bblock = 1;
9826 MONO_INST_NEW (cfg, ins, OP_BR);
9828 target = ip + 1 + (signed char)(*ip);
9830 GET_BBLOCK (cfg, tblock, target);
9831 link_bblock (cfg, cfg->cbb, tblock);
9832 ins->inst_target_bb = tblock;
9833 if (sp != stack_start) {
9834 handle_stack_args (cfg, stack_start, sp - stack_start);
9836 CHECK_UNVERIFIABLE (cfg);
9838 MONO_ADD_INS (cfg->cbb, ins);
9839 start_new_bblock = 1;
9840 inline_costs += BRANCH_COST;
9854 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9856 target = ip + 1 + *(signed char*)ip;
9862 inline_costs += BRANCH_COST;
9866 MONO_INST_NEW (cfg, ins, OP_BR);
9869 target = ip + 4 + (gint32)read32(ip);
9871 GET_BBLOCK (cfg, tblock, target);
9872 link_bblock (cfg, cfg->cbb, tblock);
9873 ins->inst_target_bb = tblock;
9874 if (sp != stack_start) {
9875 handle_stack_args (cfg, stack_start, sp - stack_start);
9877 CHECK_UNVERIFIABLE (cfg);
9880 MONO_ADD_INS (cfg->cbb, ins);
9882 start_new_bblock = 1;
9883 inline_costs += BRANCH_COST;
9890 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9891 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9892 guint32 opsize = is_short ? 1 : 4;
9894 CHECK_OPSIZE (opsize);
9896 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9899 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9904 GET_BBLOCK (cfg, tblock, target);
9905 link_bblock (cfg, cfg->cbb, tblock);
9906 GET_BBLOCK (cfg, tblock, ip);
9907 link_bblock (cfg, cfg->cbb, tblock);
9909 if (sp != stack_start) {
9910 handle_stack_args (cfg, stack_start, sp - stack_start);
9911 CHECK_UNVERIFIABLE (cfg);
9914 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9915 cmp->sreg1 = sp [0]->dreg;
9916 type_from_op (cfg, cmp, sp [0], NULL);
9919 #if SIZEOF_REGISTER == 4
9920 if (cmp->opcode == OP_LCOMPARE_IMM) {
9921 /* Convert it to OP_LCOMPARE */
9922 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9923 ins->type = STACK_I8;
9924 ins->dreg = alloc_dreg (cfg, STACK_I8);
9926 MONO_ADD_INS (cfg->cbb, ins);
9927 cmp->opcode = OP_LCOMPARE;
9928 cmp->sreg2 = ins->dreg;
9931 MONO_ADD_INS (cfg->cbb, cmp);
9933 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9934 type_from_op (cfg, ins, sp [0], NULL);
9935 MONO_ADD_INS (cfg->cbb, ins);
9936 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9937 GET_BBLOCK (cfg, tblock, target);
9938 ins->inst_true_bb = tblock;
9939 GET_BBLOCK (cfg, tblock, ip);
9940 ins->inst_false_bb = tblock;
9941 start_new_bblock = 2;
9944 inline_costs += BRANCH_COST;
9959 MONO_INST_NEW (cfg, ins, *ip);
9961 target = ip + 4 + (gint32)read32(ip);
9967 inline_costs += BRANCH_COST;
9971 MonoBasicBlock **targets;
9972 MonoBasicBlock *default_bblock;
9973 MonoJumpInfoBBTable *table;
9974 int offset_reg = alloc_preg (cfg);
9975 int target_reg = alloc_preg (cfg);
9976 int table_reg = alloc_preg (cfg);
9977 int sum_reg = alloc_preg (cfg);
9978 gboolean use_op_switch;
9982 n = read32 (ip + 1);
9985 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9989 CHECK_OPSIZE (n * sizeof (guint32));
9990 target = ip + n * sizeof (guint32);
9992 GET_BBLOCK (cfg, default_bblock, target);
9993 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9995 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9996 for (i = 0; i < n; ++i) {
9997 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9998 targets [i] = tblock;
9999 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10003 if (sp != stack_start) {
10005 * Link the current bb with the targets as well, so handle_stack_args
10006 * will set their in_stack correctly.
10008 link_bblock (cfg, cfg->cbb, default_bblock);
10009 for (i = 0; i < n; ++i)
10010 link_bblock (cfg, cfg->cbb, targets [i]);
10012 handle_stack_args (cfg, stack_start, sp - stack_start);
10014 CHECK_UNVERIFIABLE (cfg);
10016 /* Undo the links */
10017 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10018 for (i = 0; i < n; ++i)
10019 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10022 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10023 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10025 for (i = 0; i < n; ++i)
10026 link_bblock (cfg, cfg->cbb, targets [i]);
10028 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10029 table->table = targets;
10030 table->table_size = n;
10032 use_op_switch = FALSE;
10034 /* ARM implements SWITCH statements differently */
10035 /* FIXME: Make it use the generic implementation */
10036 if (!cfg->compile_aot)
10037 use_op_switch = TRUE;
10040 if (COMPILE_LLVM (cfg))
10041 use_op_switch = TRUE;
10043 cfg->cbb->has_jump_table = 1;
10045 if (use_op_switch) {
10046 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10047 ins->sreg1 = src1->dreg;
10048 ins->inst_p0 = table;
10049 ins->inst_many_bb = targets;
10050 ins->klass = GUINT_TO_POINTER (n);
10051 MONO_ADD_INS (cfg->cbb, ins);
10053 if (sizeof (gpointer) == 8)
10054 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10056 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10058 #if SIZEOF_REGISTER == 8
10059 /* The upper word might not be zero, and we add it to a 64 bit address later */
10060 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10063 if (cfg->compile_aot) {
10064 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10066 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10067 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10068 ins->inst_p0 = table;
10069 ins->dreg = table_reg;
10070 MONO_ADD_INS (cfg->cbb, ins);
10073 /* FIXME: Use load_memindex */
10074 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10076 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10078 start_new_bblock = 1;
10079 inline_costs += (BRANCH_COST * 2);
10092 case CEE_LDIND_REF:
10099 dreg = alloc_freg (cfg);
10102 dreg = alloc_lreg (cfg);
10104 case CEE_LDIND_REF:
10105 dreg = alloc_ireg_ref (cfg);
10108 dreg = alloc_preg (cfg);
10111 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10112 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10113 if (*ip == CEE_LDIND_R4)
10114 ins->type = cfg->r4_stack_type;
10115 ins->flags |= ins_flag;
10116 MONO_ADD_INS (cfg->cbb, ins);
10118 if (ins_flag & MONO_INST_VOLATILE) {
10119 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10120 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10125 case CEE_STIND_REF:
10136 if (ins_flag & MONO_INST_VOLATILE) {
10137 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10138 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10141 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10142 ins->flags |= ins_flag;
10145 MONO_ADD_INS (cfg->cbb, ins);
10147 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10148 emit_write_barrier (cfg, sp [0], sp [1]);
10157 MONO_INST_NEW (cfg, ins, (*ip));
10159 ins->sreg1 = sp [0]->dreg;
10160 ins->sreg2 = sp [1]->dreg;
10161 type_from_op (cfg, ins, sp [0], sp [1]);
10163 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10165 /* Use the immediate opcodes if possible */
10166 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10167 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10168 if (imm_opcode != -1) {
10169 ins->opcode = imm_opcode;
10170 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10173 NULLIFY_INS (sp [1]);
10177 MONO_ADD_INS ((cfg)->cbb, (ins));
10179 *sp++ = mono_decompose_opcode (cfg, ins);
10196 MONO_INST_NEW (cfg, ins, (*ip));
10198 ins->sreg1 = sp [0]->dreg;
10199 ins->sreg2 = sp [1]->dreg;
10200 type_from_op (cfg, ins, sp [0], sp [1]);
10202 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10203 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10205 /* FIXME: Pass opcode to is_inst_imm */
10207 /* Use the immediate opcodes if possible */
10208 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10211 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10212 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10213 /* Keep emulated opcodes which are optimized away later */
10214 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10215 imm_opcode = mono_op_to_op_imm (ins->opcode);
10218 if (imm_opcode != -1) {
10219 ins->opcode = imm_opcode;
10220 if (sp [1]->opcode == OP_I8CONST) {
10221 #if SIZEOF_REGISTER == 8
10222 ins->inst_imm = sp [1]->inst_l;
10224 ins->inst_ls_word = sp [1]->inst_ls_word;
10225 ins->inst_ms_word = sp [1]->inst_ms_word;
10229 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10232 /* Might be followed by an instruction added by add_widen_op */
10233 if (sp [1]->next == NULL)
10234 NULLIFY_INS (sp [1]);
10237 MONO_ADD_INS ((cfg)->cbb, (ins));
10239 *sp++ = mono_decompose_opcode (cfg, ins);
10252 case CEE_CONV_OVF_I8:
10253 case CEE_CONV_OVF_U8:
10254 case CEE_CONV_R_UN:
10257 /* Special case this earlier so we have long constants in the IR */
10258 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10259 int data = sp [-1]->inst_c0;
10260 sp [-1]->opcode = OP_I8CONST;
10261 sp [-1]->type = STACK_I8;
10262 #if SIZEOF_REGISTER == 8
10263 if ((*ip) == CEE_CONV_U8)
10264 sp [-1]->inst_c0 = (guint32)data;
10266 sp [-1]->inst_c0 = data;
10268 sp [-1]->inst_ls_word = data;
10269 if ((*ip) == CEE_CONV_U8)
10270 sp [-1]->inst_ms_word = 0;
10272 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10274 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10281 case CEE_CONV_OVF_I4:
10282 case CEE_CONV_OVF_I1:
10283 case CEE_CONV_OVF_I2:
10284 case CEE_CONV_OVF_I:
10285 case CEE_CONV_OVF_U:
10288 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10289 ADD_UNOP (CEE_CONV_OVF_I8);
10296 case CEE_CONV_OVF_U1:
10297 case CEE_CONV_OVF_U2:
10298 case CEE_CONV_OVF_U4:
10301 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10302 ADD_UNOP (CEE_CONV_OVF_U8);
10309 case CEE_CONV_OVF_I1_UN:
10310 case CEE_CONV_OVF_I2_UN:
10311 case CEE_CONV_OVF_I4_UN:
10312 case CEE_CONV_OVF_I8_UN:
10313 case CEE_CONV_OVF_U1_UN:
10314 case CEE_CONV_OVF_U2_UN:
10315 case CEE_CONV_OVF_U4_UN:
10316 case CEE_CONV_OVF_U8_UN:
10317 case CEE_CONV_OVF_I_UN:
10318 case CEE_CONV_OVF_U_UN:
10325 CHECK_CFG_EXCEPTION;
10329 case CEE_ADD_OVF_UN:
10331 case CEE_MUL_OVF_UN:
10333 case CEE_SUB_OVF_UN:
10339 GSHAREDVT_FAILURE (*ip);
10342 token = read32 (ip + 1);
10343 klass = mini_get_class (method, token, generic_context);
10344 CHECK_TYPELOAD (klass);
10346 if (generic_class_is_reference_type (cfg, klass)) {
10347 MonoInst *store, *load;
10348 int dreg = alloc_ireg_ref (cfg);
10350 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10351 load->flags |= ins_flag;
10352 MONO_ADD_INS (cfg->cbb, load);
10354 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10355 store->flags |= ins_flag;
10356 MONO_ADD_INS (cfg->cbb, store);
10358 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10359 emit_write_barrier (cfg, sp [0], sp [1]);
10361 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10367 int loc_index = -1;
10373 token = read32 (ip + 1);
10374 klass = mini_get_class (method, token, generic_context);
10375 CHECK_TYPELOAD (klass);
10377 /* Optimize the common ldobj+stloc combination */
10380 loc_index = ip [6];
10387 loc_index = ip [5] - CEE_STLOC_0;
10394 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10395 CHECK_LOCAL (loc_index);
10397 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10398 ins->dreg = cfg->locals [loc_index]->dreg;
10399 ins->flags |= ins_flag;
10402 if (ins_flag & MONO_INST_VOLATILE) {
10403 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10404 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10410 /* Optimize the ldobj+stobj combination */
10411 /* The reference case ends up being a load+store anyway */
10412 /* Skip this if the operation is volatile. */
10413 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10418 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10425 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10426 ins->flags |= ins_flag;
10429 if (ins_flag & MONO_INST_VOLATILE) {
10430 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10431 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10440 CHECK_STACK_OVF (1);
10442 n = read32 (ip + 1);
10444 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10445 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10446 ins->type = STACK_OBJ;
10449 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10450 MonoInst *iargs [1];
10451 char *str = mono_method_get_wrapper_data (method, n);
10453 if (cfg->compile_aot)
10454 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10456 EMIT_NEW_PCONST (cfg, iargs [0], str);
10457 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10459 if (cfg->opt & MONO_OPT_SHARED) {
10460 MonoInst *iargs [3];
10462 if (cfg->compile_aot) {
10463 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10465 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10466 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10467 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10468 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10469 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10471 if (cfg->cbb->out_of_line) {
10472 MonoInst *iargs [2];
10474 if (image == mono_defaults.corlib) {
10476 * Avoid relocations in AOT and save some space by using a
10477 * version of helper_ldstr specialized to mscorlib.
10479 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10480 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10482 /* Avoid creating the string object */
10483 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10484 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10485 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10489 if (cfg->compile_aot) {
10490 NEW_LDSTRCONST (cfg, ins, image, n);
10492 MONO_ADD_INS (cfg->cbb, ins);
10495 NEW_PCONST (cfg, ins, NULL);
10496 ins->type = STACK_OBJ;
10497 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10499 OUT_OF_MEMORY_FAILURE;
10502 MONO_ADD_INS (cfg->cbb, ins);
10511 MonoInst *iargs [2];
10512 MonoMethodSignature *fsig;
10515 MonoInst *vtable_arg = NULL;
10518 token = read32 (ip + 1);
10519 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10520 if (!cmethod || mono_loader_get_last_error ())
10522 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10525 mono_save_token_info (cfg, image, token, cmethod);
10527 if (!mono_class_init (cmethod->klass))
10528 TYPE_LOAD_ERROR (cmethod->klass);
10530 context_used = mini_method_check_context_used (cfg, cmethod);
10532 if (mono_security_core_clr_enabled ())
10533 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10535 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10536 emit_class_init (cfg, cmethod->klass);
10537 CHECK_TYPELOAD (cmethod->klass);
10541 if (cfg->gsharedvt) {
10542 if (mini_is_gsharedvt_variable_signature (sig))
10543 GSHAREDVT_FAILURE (*ip);
10547 n = fsig->param_count;
10551 * Generate smaller code for the common newobj <exception> instruction in
10552 * argument checking code.
10554 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10555 is_exception_class (cmethod->klass) && n <= 2 &&
10556 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10557 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10558 MonoInst *iargs [3];
10562 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10565 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10568 iargs [1] = sp [0];
10569 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10572 iargs [1] = sp [0];
10573 iargs [2] = sp [1];
10574 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10577 g_assert_not_reached ();
10585 /* move the args to allow room for 'this' in the first position */
10591 /* check_call_signature () requires sp[0] to be set */
10592 this_ins.type = STACK_OBJ;
10593 sp [0] = &this_ins;
10594 if (check_call_signature (cfg, fsig, sp))
10599 if (mini_class_is_system_array (cmethod->klass)) {
10600 *sp = emit_get_rgctx_method (cfg, context_used,
10601 cmethod, MONO_RGCTX_INFO_METHOD);
10603 /* Avoid varargs in the common case */
10604 if (fsig->param_count == 1)
10605 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10606 else if (fsig->param_count == 2)
10607 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10608 else if (fsig->param_count == 3)
10609 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10610 else if (fsig->param_count == 4)
10611 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10613 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10614 } else if (cmethod->string_ctor) {
10615 g_assert (!context_used);
10616 g_assert (!vtable_arg);
10617 /* we simply pass a null pointer */
10618 EMIT_NEW_PCONST (cfg, *sp, NULL);
10619 /* now call the string ctor */
10620 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10622 if (cmethod->klass->valuetype) {
10623 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10624 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10625 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10630 * The code generated by mini_emit_virtual_call () expects
10631 * iargs [0] to be a boxed instance, but luckily the vcall
10632 * will be transformed into a normal call there.
10634 } else if (context_used) {
10635 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10638 MonoVTable *vtable = NULL;
10640 if (!cfg->compile_aot)
10641 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10642 CHECK_TYPELOAD (cmethod->klass);
10645 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10646 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10647 * As a workaround, we call class cctors before allocating objects.
10649 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10650 emit_class_init (cfg, cmethod->klass);
10651 if (cfg->verbose_level > 2)
10652 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10653 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10656 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10659 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10662 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10664 /* Now call the actual ctor */
10665 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10666 CHECK_CFG_EXCEPTION;
10669 if (alloc == NULL) {
10671 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10672 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10680 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10681 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10684 case CEE_CASTCLASS:
10688 token = read32 (ip + 1);
10689 klass = mini_get_class (method, token, generic_context);
10690 CHECK_TYPELOAD (klass);
10691 if (sp [0]->type != STACK_OBJ)
10694 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10695 CHECK_CFG_EXCEPTION;
10704 token = read32 (ip + 1);
10705 klass = mini_get_class (method, token, generic_context);
10706 CHECK_TYPELOAD (klass);
10707 if (sp [0]->type != STACK_OBJ)
10710 context_used = mini_class_check_context_used (cfg, klass);
10712 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10713 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10714 MonoInst *args [3];
10721 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10724 if (cfg->compile_aot) {
10725 idx = get_castclass_cache_idx (cfg);
10726 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10728 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10731 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10734 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10735 MonoMethod *mono_isinst;
10736 MonoInst *iargs [1];
10739 mono_isinst = mono_marshal_get_isinst (klass);
10740 iargs [0] = sp [0];
10742 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10743 iargs, ip, cfg->real_offset, TRUE);
10744 CHECK_CFG_EXCEPTION;
10745 g_assert (costs > 0);
10748 cfg->real_offset += 5;
10752 inline_costs += costs;
10755 ins = handle_isinst (cfg, klass, *sp, context_used);
10756 CHECK_CFG_EXCEPTION;
10762 case CEE_UNBOX_ANY: {
10763 MonoInst *res, *addr;
10768 token = read32 (ip + 1);
10769 klass = mini_get_class (method, token, generic_context);
10770 CHECK_TYPELOAD (klass);
10772 mono_save_token_info (cfg, image, token, klass);
10774 context_used = mini_class_check_context_used (cfg, klass);
10776 if (mini_is_gsharedvt_klass (klass)) {
10777 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10779 } else if (generic_class_is_reference_type (cfg, klass)) {
10780 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10781 CHECK_CFG_EXCEPTION;
10782 } else if (mono_class_is_nullable (klass)) {
10783 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10785 addr = handle_unbox (cfg, klass, sp, context_used);
10787 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10798 MonoClass *enum_class;
10799 MonoMethod *has_flag;
10805 token = read32 (ip + 1);
10806 klass = mini_get_class (method, token, generic_context);
10807 CHECK_TYPELOAD (klass);
10809 mono_save_token_info (cfg, image, token, klass);
10811 context_used = mini_class_check_context_used (cfg, klass);
10813 if (generic_class_is_reference_type (cfg, klass)) {
10819 if (klass == mono_defaults.void_class)
10821 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10823 /* frequent check in generic code: box (struct), brtrue */
10828 * <push int/long ptr>
10831 * constrained. MyFlags
10832 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10834 * If we find this sequence and the operand types on box and constrained
10835 * are equal, we can emit a specialized instruction sequence instead of
10836 * the very slow HasFlag () call.
10838 if ((cfg->opt & MONO_OPT_INTRINS) &&
10839 /* Cheap checks first. */
10840 ip + 5 + 6 + 5 < end &&
10841 ip [5] == CEE_PREFIX1 &&
10842 ip [6] == CEE_CONSTRAINED_ &&
10843 ip [11] == CEE_CALLVIRT &&
10844 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10845 mono_class_is_enum (klass) &&
10846 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10847 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10848 has_flag->klass == mono_defaults.enum_class &&
10849 !strcmp (has_flag->name, "HasFlag") &&
10850 has_flag->signature->hasthis &&
10851 has_flag->signature->param_count == 1) {
10852 CHECK_TYPELOAD (enum_class);
10854 if (enum_class == klass) {
10855 MonoInst *enum_this, *enum_flag;
10860 enum_this = sp [0];
10861 enum_flag = sp [1];
10863 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10868 // FIXME: LLVM can't handle the inconsistent bb linking
10869 if (!mono_class_is_nullable (klass) &&
10870 !mini_is_gsharedvt_klass (klass) &&
10871 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10872 (ip [5] == CEE_BRTRUE ||
10873 ip [5] == CEE_BRTRUE_S ||
10874 ip [5] == CEE_BRFALSE ||
10875 ip [5] == CEE_BRFALSE_S)) {
10876 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10878 MonoBasicBlock *true_bb, *false_bb;
10882 if (cfg->verbose_level > 3) {
10883 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10884 printf ("<box+brtrue opt>\n");
10889 case CEE_BRFALSE_S:
10892 target = ip + 1 + (signed char)(*ip);
10899 target = ip + 4 + (gint)(read32 (ip));
10903 g_assert_not_reached ();
10907 * We need to link both bblocks, since it is needed for handling stack
10908 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10909 * Branching to only one of them would lead to inconsistencies, so
10910 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10912 GET_BBLOCK (cfg, true_bb, target);
10913 GET_BBLOCK (cfg, false_bb, ip);
10915 mono_link_bblock (cfg, cfg->cbb, true_bb);
10916 mono_link_bblock (cfg, cfg->cbb, false_bb);
10918 if (sp != stack_start) {
10919 handle_stack_args (cfg, stack_start, sp - stack_start);
10921 CHECK_UNVERIFIABLE (cfg);
10924 if (COMPILE_LLVM (cfg)) {
10925 dreg = alloc_ireg (cfg);
10926 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10927 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10929 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10931 /* The JIT can't eliminate the iconst+compare */
10932 MONO_INST_NEW (cfg, ins, OP_BR);
10933 ins->inst_target_bb = is_true ? true_bb : false_bb;
10934 MONO_ADD_INS (cfg->cbb, ins);
10937 start_new_bblock = 1;
10941 *sp++ = handle_box (cfg, val, klass, context_used);
10943 CHECK_CFG_EXCEPTION;
10952 token = read32 (ip + 1);
10953 klass = mini_get_class (method, token, generic_context);
10954 CHECK_TYPELOAD (klass);
10956 mono_save_token_info (cfg, image, token, klass);
10958 context_used = mini_class_check_context_used (cfg, klass);
10960 if (mono_class_is_nullable (klass)) {
10963 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10964 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10968 ins = handle_unbox (cfg, klass, sp, context_used);
10981 MonoClassField *field;
10982 #ifndef DISABLE_REMOTING
10986 gboolean is_instance;
10988 gpointer addr = NULL;
10989 gboolean is_special_static;
10991 MonoInst *store_val = NULL;
10992 MonoInst *thread_ins;
10995 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10997 if (op == CEE_STFLD) {
11000 store_val = sp [1];
11005 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11007 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11010 if (op == CEE_STSFLD) {
11013 store_val = sp [0];
11018 token = read32 (ip + 1);
11019 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11020 field = mono_method_get_wrapper_data (method, token);
11021 klass = field->parent;
11024 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11027 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11028 FIELD_ACCESS_FAILURE (method, field);
11029 mono_class_init (klass);
11031 /* if the class is Critical then transparent code cannot access it's fields */
11032 if (!is_instance && mono_security_core_clr_enabled ())
11033 ensure_method_is_allowed_to_access_field (cfg, method, field);
11035 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11036 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11037 if (mono_security_core_clr_enabled ())
11038 ensure_method_is_allowed_to_access_field (cfg, method, field);
11041 ftype = mono_field_get_type (field);
11044 * LDFLD etc. is usable on static fields as well, so convert those cases to
11047 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11059 g_assert_not_reached ();
11061 is_instance = FALSE;
11064 context_used = mini_class_check_context_used (cfg, klass);
11066 /* INSTANCE CASE */
11068 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11069 if (op == CEE_STFLD) {
11070 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11072 #ifndef DISABLE_REMOTING
11073 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11074 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11075 MonoInst *iargs [5];
11077 GSHAREDVT_FAILURE (op);
11079 iargs [0] = sp [0];
11080 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11081 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11082 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11084 iargs [4] = sp [1];
11086 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11087 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11088 iargs, ip, cfg->real_offset, TRUE);
11089 CHECK_CFG_EXCEPTION;
11090 g_assert (costs > 0);
11092 cfg->real_offset += 5;
11094 inline_costs += costs;
11096 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11103 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11105 if (mini_is_gsharedvt_klass (klass)) {
11106 MonoInst *offset_ins;
11108 context_used = mini_class_check_context_used (cfg, klass);
11110 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11111 dreg = alloc_ireg_mp (cfg);
11112 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11113 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11114 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11116 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11118 if (sp [0]->opcode != OP_LDADDR)
11119 store->flags |= MONO_INST_FAULT;
11121 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11122 /* insert call to write barrier */
11126 dreg = alloc_ireg_mp (cfg);
11127 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11128 emit_write_barrier (cfg, ptr, sp [1]);
11131 store->flags |= ins_flag;
11138 #ifndef DISABLE_REMOTING
11139 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11140 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11141 MonoInst *iargs [4];
11143 GSHAREDVT_FAILURE (op);
11145 iargs [0] = sp [0];
11146 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11147 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11148 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11149 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11150 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11151 iargs, ip, cfg->real_offset, TRUE);
11152 CHECK_CFG_EXCEPTION;
11153 g_assert (costs > 0);
11155 cfg->real_offset += 5;
11159 inline_costs += costs;
11161 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11167 if (sp [0]->type == STACK_VTYPE) {
11170 /* Have to compute the address of the variable */
11172 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11174 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11176 g_assert (var->klass == klass);
11178 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11182 if (op == CEE_LDFLDA) {
11183 if (sp [0]->type == STACK_OBJ) {
11184 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11185 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11188 dreg = alloc_ireg_mp (cfg);
11190 if (mini_is_gsharedvt_klass (klass)) {
11191 MonoInst *offset_ins;
11193 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11194 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11196 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11198 ins->klass = mono_class_from_mono_type (field->type);
11199 ins->type = STACK_MP;
11204 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11206 if (mini_is_gsharedvt_klass (klass)) {
11207 MonoInst *offset_ins;
11209 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11210 dreg = alloc_ireg_mp (cfg);
11211 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11212 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11214 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11216 load->flags |= ins_flag;
11217 if (sp [0]->opcode != OP_LDADDR)
11218 load->flags |= MONO_INST_FAULT;
11230 context_used = mini_class_check_context_used (cfg, klass);
11232 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11235 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11236 * to be called here.
11238 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11239 mono_class_vtable (cfg->domain, klass);
11240 CHECK_TYPELOAD (klass);
11242 mono_domain_lock (cfg->domain);
11243 if (cfg->domain->special_static_fields)
11244 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11245 mono_domain_unlock (cfg->domain);
11247 is_special_static = mono_class_field_is_special_static (field);
11249 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11250 thread_ins = mono_get_thread_intrinsic (cfg);
11254 /* Generate IR to compute the field address */
11255 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11257 * Fast access to TLS data
11258 * Inline version of get_thread_static_data () in
11262 int idx, static_data_reg, array_reg, dreg;
11264 GSHAREDVT_FAILURE (op);
11266 MONO_ADD_INS (cfg->cbb, thread_ins);
11267 static_data_reg = alloc_ireg (cfg);
11268 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11270 if (cfg->compile_aot) {
11271 int offset_reg, offset2_reg, idx_reg;
11273 /* For TLS variables, this will return the TLS offset */
11274 EMIT_NEW_SFLDACONST (cfg, ins, field);
11275 offset_reg = ins->dreg;
11276 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11277 idx_reg = alloc_ireg (cfg);
11278 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11279 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11280 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11281 array_reg = alloc_ireg (cfg);
11282 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11283 offset2_reg = alloc_ireg (cfg);
11284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11286 dreg = alloc_ireg (cfg);
11287 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11289 offset = (gsize)addr & 0x7fffffff;
11290 idx = offset & 0x3f;
11292 array_reg = alloc_ireg (cfg);
11293 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11294 dreg = alloc_ireg (cfg);
11295 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11297 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11298 (cfg->compile_aot && is_special_static) ||
11299 (context_used && is_special_static)) {
11300 MonoInst *iargs [2];
11302 g_assert (field->parent);
11303 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11304 if (context_used) {
11305 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11306 field, MONO_RGCTX_INFO_CLASS_FIELD);
11308 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11310 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11311 } else if (context_used) {
11312 MonoInst *static_data;
11315 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11316 method->klass->name_space, method->klass->name, method->name,
11317 depth, field->offset);
11320 if (mono_class_needs_cctor_run (klass, method))
11321 emit_class_init (cfg, klass);
11324 * The pointer we're computing here is
11326 * super_info.static_data + field->offset
11328 static_data = emit_get_rgctx_klass (cfg, context_used,
11329 klass, MONO_RGCTX_INFO_STATIC_DATA);
11331 if (mini_is_gsharedvt_klass (klass)) {
11332 MonoInst *offset_ins;
11334 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11335 dreg = alloc_ireg_mp (cfg);
11336 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11337 } else if (field->offset == 0) {
11340 int addr_reg = mono_alloc_preg (cfg);
11341 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11343 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11344 MonoInst *iargs [2];
11346 g_assert (field->parent);
11347 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11348 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11349 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11351 MonoVTable *vtable = NULL;
11353 if (!cfg->compile_aot)
11354 vtable = mono_class_vtable (cfg->domain, klass);
11355 CHECK_TYPELOAD (klass);
11358 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11359 if (!(g_slist_find (class_inits, klass))) {
11360 emit_class_init (cfg, klass);
11361 if (cfg->verbose_level > 2)
11362 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11363 class_inits = g_slist_prepend (class_inits, klass);
11366 if (cfg->run_cctors) {
11368 /* This makes so that inline cannot trigger */
11369 /* .cctors: too many apps depend on them */
11370 /* running with a specific order... */
11372 if (! vtable->initialized)
11373 INLINE_FAILURE ("class init");
11374 ex = mono_runtime_class_init_full (vtable, FALSE);
11376 set_exception_object (cfg, ex);
11377 goto exception_exit;
11381 if (cfg->compile_aot)
11382 EMIT_NEW_SFLDACONST (cfg, ins, field);
11385 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11387 EMIT_NEW_PCONST (cfg, ins, addr);
11390 MonoInst *iargs [1];
11391 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11392 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11396 /* Generate IR to do the actual load/store operation */
11398 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11399 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11400 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11403 if (op == CEE_LDSFLDA) {
11404 ins->klass = mono_class_from_mono_type (ftype);
11405 ins->type = STACK_PTR;
11407 } else if (op == CEE_STSFLD) {
11410 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11411 store->flags |= ins_flag;
11413 gboolean is_const = FALSE;
11414 MonoVTable *vtable = NULL;
11415 gpointer addr = NULL;
11417 if (!context_used) {
11418 vtable = mono_class_vtable (cfg->domain, klass);
11419 CHECK_TYPELOAD (klass);
11421 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11422 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11423 int ro_type = ftype->type;
11425 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11426 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11427 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11430 GSHAREDVT_FAILURE (op);
11432 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11435 case MONO_TYPE_BOOLEAN:
11437 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11441 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11444 case MONO_TYPE_CHAR:
11446 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11450 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11455 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11459 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11464 case MONO_TYPE_PTR:
11465 case MONO_TYPE_FNPTR:
11466 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11467 type_to_eval_stack_type ((cfg), field->type, *sp);
11470 case MONO_TYPE_STRING:
11471 case MONO_TYPE_OBJECT:
11472 case MONO_TYPE_CLASS:
11473 case MONO_TYPE_SZARRAY:
11474 case MONO_TYPE_ARRAY:
11475 if (!mono_gc_is_moving ()) {
11476 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11477 type_to_eval_stack_type ((cfg), field->type, *sp);
11485 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11490 case MONO_TYPE_VALUETYPE:
11500 CHECK_STACK_OVF (1);
11502 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11503 load->flags |= ins_flag;
11509 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11510 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11511 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11522 token = read32 (ip + 1);
11523 klass = mini_get_class (method, token, generic_context);
11524 CHECK_TYPELOAD (klass);
11525 if (ins_flag & MONO_INST_VOLATILE) {
11526 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11527 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11529 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11530 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11531 ins->flags |= ins_flag;
11532 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11533 generic_class_is_reference_type (cfg, klass)) {
11534 /* insert call to write barrier */
11535 emit_write_barrier (cfg, sp [0], sp [1]);
11547 const char *data_ptr;
11549 guint32 field_token;
11555 token = read32 (ip + 1);
11557 klass = mini_get_class (method, token, generic_context);
11558 CHECK_TYPELOAD (klass);
11560 context_used = mini_class_check_context_used (cfg, klass);
11562 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11563 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11564 ins->sreg1 = sp [0]->dreg;
11565 ins->type = STACK_I4;
11566 ins->dreg = alloc_ireg (cfg);
11567 MONO_ADD_INS (cfg->cbb, ins);
11568 *sp = mono_decompose_opcode (cfg, ins);
11571 if (context_used) {
11572 MonoInst *args [3];
11573 MonoClass *array_class = mono_array_class_get (klass, 1);
11574 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11576 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11579 args [0] = emit_get_rgctx_klass (cfg, context_used,
11580 array_class, MONO_RGCTX_INFO_VTABLE);
11585 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11587 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11589 if (cfg->opt & MONO_OPT_SHARED) {
11590 /* Decompose now to avoid problems with references to the domainvar */
11591 MonoInst *iargs [3];
11593 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11594 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11595 iargs [2] = sp [0];
11597 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11599 /* Decompose later since it is needed by abcrem */
11600 MonoClass *array_type = mono_array_class_get (klass, 1);
11601 mono_class_vtable (cfg->domain, array_type);
11602 CHECK_TYPELOAD (array_type);
11604 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11605 ins->dreg = alloc_ireg_ref (cfg);
11606 ins->sreg1 = sp [0]->dreg;
11607 ins->inst_newa_class = klass;
11608 ins->type = STACK_OBJ;
11609 ins->klass = array_type;
11610 MONO_ADD_INS (cfg->cbb, ins);
11611 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11612 cfg->cbb->has_array_access = TRUE;
11614 /* Needed so mono_emit_load_get_addr () gets called */
11615 mono_get_got_var (cfg);
11625 * we inline/optimize the initialization sequence if possible.
11626 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11627 * for small sizes open code the memcpy
11628 * ensure the rva field is big enough
11630 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11631 MonoMethod *memcpy_method = get_memcpy_method ();
11632 MonoInst *iargs [3];
11633 int add_reg = alloc_ireg_mp (cfg);
11635 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11636 if (cfg->compile_aot) {
11637 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11639 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11641 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11642 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11651 if (sp [0]->type != STACK_OBJ)
11654 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11655 ins->dreg = alloc_preg (cfg);
11656 ins->sreg1 = sp [0]->dreg;
11657 ins->type = STACK_I4;
11658 /* This flag will be inherited by the decomposition */
11659 ins->flags |= MONO_INST_FAULT;
11660 MONO_ADD_INS (cfg->cbb, ins);
11661 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11662 cfg->cbb->has_array_access = TRUE;
11670 if (sp [0]->type != STACK_OBJ)
11673 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11675 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11676 CHECK_TYPELOAD (klass);
11677 /* we need to make sure that this array is exactly the type it needs
11678 * to be for correctness. the wrappers are lax with their usage
11679 * so we need to ignore them here
11681 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11682 MonoClass *array_class = mono_array_class_get (klass, 1);
11683 mini_emit_check_array_type (cfg, sp [0], array_class);
11684 CHECK_TYPELOAD (array_class);
11688 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11693 case CEE_LDELEM_I1:
11694 case CEE_LDELEM_U1:
11695 case CEE_LDELEM_I2:
11696 case CEE_LDELEM_U2:
11697 case CEE_LDELEM_I4:
11698 case CEE_LDELEM_U4:
11699 case CEE_LDELEM_I8:
11701 case CEE_LDELEM_R4:
11702 case CEE_LDELEM_R8:
11703 case CEE_LDELEM_REF: {
11709 if (*ip == CEE_LDELEM) {
11711 token = read32 (ip + 1);
11712 klass = mini_get_class (method, token, generic_context);
11713 CHECK_TYPELOAD (klass);
11714 mono_class_init (klass);
11717 klass = array_access_to_klass (*ip);
11719 if (sp [0]->type != STACK_OBJ)
11722 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11724 if (mini_is_gsharedvt_variable_klass (klass)) {
11725 // FIXME-VT: OP_ICONST optimization
11726 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11727 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11728 ins->opcode = OP_LOADV_MEMBASE;
11729 } else if (sp [1]->opcode == OP_ICONST) {
11730 int array_reg = sp [0]->dreg;
11731 int index_reg = sp [1]->dreg;
11732 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11734 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11735 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11737 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11738 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11741 if (*ip == CEE_LDELEM)
11748 case CEE_STELEM_I1:
11749 case CEE_STELEM_I2:
11750 case CEE_STELEM_I4:
11751 case CEE_STELEM_I8:
11752 case CEE_STELEM_R4:
11753 case CEE_STELEM_R8:
11754 case CEE_STELEM_REF:
11759 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11761 if (*ip == CEE_STELEM) {
11763 token = read32 (ip + 1);
11764 klass = mini_get_class (method, token, generic_context);
11765 CHECK_TYPELOAD (klass);
11766 mono_class_init (klass);
11769 klass = array_access_to_klass (*ip);
11771 if (sp [0]->type != STACK_OBJ)
11774 emit_array_store (cfg, klass, sp, TRUE);
11776 if (*ip == CEE_STELEM)
11783 case CEE_CKFINITE: {
11787 if (cfg->llvm_only) {
11788 MonoInst *iargs [1];
11790 iargs [0] = sp [0];
11791 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11793 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11794 ins->sreg1 = sp [0]->dreg;
11795 ins->dreg = alloc_freg (cfg);
11796 ins->type = STACK_R8;
11797 MONO_ADD_INS (cfg->cbb, ins);
11799 *sp++ = mono_decompose_opcode (cfg, ins);
11805 case CEE_REFANYVAL: {
11806 MonoInst *src_var, *src;
11808 int klass_reg = alloc_preg (cfg);
11809 int dreg = alloc_preg (cfg);
11811 GSHAREDVT_FAILURE (*ip);
11814 MONO_INST_NEW (cfg, ins, *ip);
11817 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11818 CHECK_TYPELOAD (klass);
11820 context_used = mini_class_check_context_used (cfg, klass);
11823 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11825 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11826 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11827 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11829 if (context_used) {
11830 MonoInst *klass_ins;
11832 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11833 klass, MONO_RGCTX_INFO_KLASS);
11836 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11837 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11839 mini_emit_class_check (cfg, klass_reg, klass);
11841 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11842 ins->type = STACK_MP;
11843 ins->klass = klass;
11848 case CEE_MKREFANY: {
11849 MonoInst *loc, *addr;
11851 GSHAREDVT_FAILURE (*ip);
11854 MONO_INST_NEW (cfg, ins, *ip);
11857 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11858 CHECK_TYPELOAD (klass);
11860 context_used = mini_class_check_context_used (cfg, klass);
11862 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11863 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11865 if (context_used) {
11866 MonoInst *const_ins;
11867 int type_reg = alloc_preg (cfg);
11869 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11871 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11873 } else if (cfg->compile_aot) {
11874 int const_reg = alloc_preg (cfg);
11875 int type_reg = alloc_preg (cfg);
11877 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11879 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11882 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11883 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11885 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11887 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11888 ins->type = STACK_VTYPE;
11889 ins->klass = mono_defaults.typed_reference_class;
11894 case CEE_LDTOKEN: {
11896 MonoClass *handle_class;
11898 CHECK_STACK_OVF (1);
11901 n = read32 (ip + 1);
11903 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11904 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11905 handle = mono_method_get_wrapper_data (method, n);
11906 handle_class = mono_method_get_wrapper_data (method, n + 1);
11907 if (handle_class == mono_defaults.typehandle_class)
11908 handle = &((MonoClass*)handle)->byval_arg;
11911 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11916 mono_class_init (handle_class);
11917 if (cfg->gshared) {
11918 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11919 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11920 /* This case handles ldtoken
11921 of an open type, like for
11924 } else if (handle_class == mono_defaults.typehandle_class) {
11925 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11926 } else if (handle_class == mono_defaults.fieldhandle_class)
11927 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11928 else if (handle_class == mono_defaults.methodhandle_class)
11929 context_used = mini_method_check_context_used (cfg, handle);
11931 g_assert_not_reached ();
11934 if ((cfg->opt & MONO_OPT_SHARED) &&
11935 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11936 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11937 MonoInst *addr, *vtvar, *iargs [3];
11938 int method_context_used;
11940 method_context_used = mini_method_check_context_used (cfg, method);
11942 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11944 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11945 EMIT_NEW_ICONST (cfg, iargs [1], n);
11946 if (method_context_used) {
11947 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11948 method, MONO_RGCTX_INFO_METHOD);
11949 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11951 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11952 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11954 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11956 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11958 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11960 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11961 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11962 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11963 (cmethod->klass == mono_defaults.systemtype_class) &&
11964 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11965 MonoClass *tclass = mono_class_from_mono_type (handle);
11967 mono_class_init (tclass);
11968 if (context_used) {
11969 ins = emit_get_rgctx_klass (cfg, context_used,
11970 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11971 } else if (cfg->compile_aot) {
11972 if (method->wrapper_type) {
11973 mono_error_init (&error); //got to do it since there are multiple conditionals below
11974 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11975 /* Special case for static synchronized wrappers */
11976 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11978 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11979 /* FIXME: n is not a normal token */
11981 EMIT_NEW_PCONST (cfg, ins, NULL);
11984 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11987 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11989 ins->type = STACK_OBJ;
11990 ins->klass = cmethod->klass;
11993 MonoInst *addr, *vtvar;
11995 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11997 if (context_used) {
11998 if (handle_class == mono_defaults.typehandle_class) {
11999 ins = emit_get_rgctx_klass (cfg, context_used,
12000 mono_class_from_mono_type (handle),
12001 MONO_RGCTX_INFO_TYPE);
12002 } else if (handle_class == mono_defaults.methodhandle_class) {
12003 ins = emit_get_rgctx_method (cfg, context_used,
12004 handle, MONO_RGCTX_INFO_METHOD);
12005 } else if (handle_class == mono_defaults.fieldhandle_class) {
12006 ins = emit_get_rgctx_field (cfg, context_used,
12007 handle, MONO_RGCTX_INFO_CLASS_FIELD);
12009 g_assert_not_reached ();
12011 } else if (cfg->compile_aot) {
12012 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12014 EMIT_NEW_PCONST (cfg, ins, handle);
12016 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12017 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12018 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12028 MONO_INST_NEW (cfg, ins, OP_THROW);
12030 ins->sreg1 = sp [0]->dreg;
12032 cfg->cbb->out_of_line = TRUE;
12033 MONO_ADD_INS (cfg->cbb, ins);
12034 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12035 MONO_ADD_INS (cfg->cbb, ins);
12038 link_bblock (cfg, cfg->cbb, end_bblock);
12039 start_new_bblock = 1;
12040 /* This can complicate code generation for llvm since the return value might not be defined */
12041 if (COMPILE_LLVM (cfg))
12042 INLINE_FAILURE ("throw");
12044 case CEE_ENDFINALLY:
12045 /* mono_save_seq_point_info () depends on this */
12046 if (sp != stack_start)
12047 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12048 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12049 MONO_ADD_INS (cfg->cbb, ins);
12051 start_new_bblock = 1;
12054 * Control will leave the method so empty the stack, otherwise
12055 * the next basic block will start with a nonempty stack.
12057 while (sp != stack_start) {
12062 case CEE_LEAVE_S: {
12065 if (*ip == CEE_LEAVE) {
12067 target = ip + 5 + (gint32)read32(ip + 1);
12070 target = ip + 2 + (signed char)(ip [1]);
12073 /* empty the stack */
12074 while (sp != stack_start) {
12079 * If this leave statement is in a catch block, check for a
12080 * pending exception, and rethrow it if necessary.
12081 * We avoid doing this in runtime invoke wrappers, since those are called
12082 * by native code which excepts the wrapper to catch all exceptions.
12084 for (i = 0; i < header->num_clauses; ++i) {
12085 MonoExceptionClause *clause = &header->clauses [i];
12088 * Use <= in the final comparison to handle clauses with multiple
12089 * leave statements, like in bug #78024.
12090 * The ordering of the exception clauses guarantees that we find the
12091 * innermost clause.
12093 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12095 MonoBasicBlock *dont_throw;
12100 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12103 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12105 NEW_BBLOCK (cfg, dont_throw);
12108 * Currently, we always rethrow the abort exception, despite the
12109 * fact that this is not correct. See thread6.cs for an example.
12110 * But propagating the abort exception is more important than
12111 * getting the sematics right.
12113 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12114 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12115 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12117 MONO_START_BB (cfg, dont_throw);
12122 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12125 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12127 MonoExceptionClause *clause;
12129 for (tmp = handlers; tmp; tmp = tmp->next) {
12130 clause = tmp->data;
12131 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12133 link_bblock (cfg, cfg->cbb, tblock);
12134 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12135 ins->inst_target_bb = tblock;
12136 ins->inst_eh_block = clause;
12137 MONO_ADD_INS (cfg->cbb, ins);
12138 cfg->cbb->has_call_handler = 1;
12139 if (COMPILE_LLVM (cfg)) {
12140 MonoBasicBlock *target_bb;
12143 * Link the finally bblock with the target, since it will
12144 * conceptually branch there.
12145 * FIXME: Have to link the bblock containing the endfinally.
12147 GET_BBLOCK (cfg, target_bb, target);
12148 link_bblock (cfg, tblock, target_bb);
12151 g_list_free (handlers);
12154 MONO_INST_NEW (cfg, ins, OP_BR);
12155 MONO_ADD_INS (cfg->cbb, ins);
12156 GET_BBLOCK (cfg, tblock, target);
12157 link_bblock (cfg, cfg->cbb, tblock);
12158 ins->inst_target_bb = tblock;
12160 start_new_bblock = 1;
12162 if (*ip == CEE_LEAVE)
12171 * Mono specific opcodes
12173 case MONO_CUSTOM_PREFIX: {
12175 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12179 case CEE_MONO_ICALL: {
12181 MonoJitICallInfo *info;
12183 token = read32 (ip + 2);
12184 func = mono_method_get_wrapper_data (method, token);
12185 info = mono_find_jit_icall_by_addr (func);
12187 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12190 CHECK_STACK (info->sig->param_count);
12191 sp -= info->sig->param_count;
12193 ins = mono_emit_jit_icall (cfg, info->func, sp);
12194 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12198 inline_costs += 10 * num_calls++;
12202 case CEE_MONO_LDPTR_CARD_TABLE:
12203 case CEE_MONO_LDPTR_NURSERY_START:
12204 case CEE_MONO_LDPTR_NURSERY_BITS:
12205 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12206 CHECK_STACK_OVF (1);
12209 case CEE_MONO_LDPTR_CARD_TABLE:
12210 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
12212 case CEE_MONO_LDPTR_NURSERY_START:
12213 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_GC_NURSERY_START);
12215 case CEE_MONO_LDPTR_NURSERY_BITS:
12216 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_GC_NURSERY_BITS);
12218 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12219 emit_runtime_constant (cfg, &ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG);
12225 inline_costs += 10 * num_calls++;
12228 case CEE_MONO_LDPTR: {
12231 CHECK_STACK_OVF (1);
12233 token = read32 (ip + 2);
12235 ptr = mono_method_get_wrapper_data (method, token);
12236 EMIT_NEW_PCONST (cfg, ins, ptr);
12239 inline_costs += 10 * num_calls++;
12240 /* Can't embed random pointers into AOT code */
12244 case CEE_MONO_JIT_ICALL_ADDR: {
12245 MonoJitICallInfo *callinfo;
12248 CHECK_STACK_OVF (1);
12250 token = read32 (ip + 2);
12252 ptr = mono_method_get_wrapper_data (method, token);
12253 callinfo = mono_find_jit_icall_by_addr (ptr);
12254 g_assert (callinfo);
12255 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12258 inline_costs += 10 * num_calls++;
12261 case CEE_MONO_ICALL_ADDR: {
12262 MonoMethod *cmethod;
12265 CHECK_STACK_OVF (1);
12267 token = read32 (ip + 2);
12269 cmethod = mono_method_get_wrapper_data (method, token);
12271 if (cfg->compile_aot) {
12272 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12274 ptr = mono_lookup_internal_call (cmethod);
12276 EMIT_NEW_PCONST (cfg, ins, ptr);
12282 case CEE_MONO_VTADDR: {
12283 MonoInst *src_var, *src;
12289 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12290 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12295 case CEE_MONO_NEWOBJ: {
12296 MonoInst *iargs [2];
12298 CHECK_STACK_OVF (1);
12300 token = read32 (ip + 2);
12301 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12302 mono_class_init (klass);
12303 NEW_DOMAINCONST (cfg, iargs [0]);
12304 MONO_ADD_INS (cfg->cbb, iargs [0]);
12305 NEW_CLASSCONST (cfg, iargs [1], klass);
12306 MONO_ADD_INS (cfg->cbb, iargs [1]);
12307 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12309 inline_costs += 10 * num_calls++;
12312 case CEE_MONO_OBJADDR:
12315 MONO_INST_NEW (cfg, ins, OP_MOVE);
12316 ins->dreg = alloc_ireg_mp (cfg);
12317 ins->sreg1 = sp [0]->dreg;
12318 ins->type = STACK_MP;
12319 MONO_ADD_INS (cfg->cbb, ins);
12323 case CEE_MONO_LDNATIVEOBJ:
12325 * Similar to LDOBJ, but instead load the unmanaged
12326 * representation of the vtype to the stack.
12331 token = read32 (ip + 2);
12332 klass = mono_method_get_wrapper_data (method, token);
12333 g_assert (klass->valuetype);
12334 mono_class_init (klass);
12337 MonoInst *src, *dest, *temp;
12340 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12341 temp->backend.is_pinvoke = 1;
12342 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12343 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12345 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12346 dest->type = STACK_VTYPE;
12347 dest->klass = klass;
12353 case CEE_MONO_RETOBJ: {
12355 * Same as RET, but return the native representation of a vtype
12358 g_assert (cfg->ret);
12359 g_assert (mono_method_signature (method)->pinvoke);
12364 token = read32 (ip + 2);
12365 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12367 if (!cfg->vret_addr) {
12368 g_assert (cfg->ret_var_is_local);
12370 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12372 EMIT_NEW_RETLOADA (cfg, ins);
12374 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12376 if (sp != stack_start)
12379 MONO_INST_NEW (cfg, ins, OP_BR);
12380 ins->inst_target_bb = end_bblock;
12381 MONO_ADD_INS (cfg->cbb, ins);
12382 link_bblock (cfg, cfg->cbb, end_bblock);
12383 start_new_bblock = 1;
12387 case CEE_MONO_CISINST:
12388 case CEE_MONO_CCASTCLASS: {
12393 token = read32 (ip + 2);
12394 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12395 if (ip [1] == CEE_MONO_CISINST)
12396 ins = handle_cisinst (cfg, klass, sp [0]);
12398 ins = handle_ccastclass (cfg, klass, sp [0]);
12403 case CEE_MONO_SAVE_LMF:
12404 case CEE_MONO_RESTORE_LMF:
12407 case CEE_MONO_CLASSCONST:
12408 CHECK_STACK_OVF (1);
12410 token = read32 (ip + 2);
12411 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12414 inline_costs += 10 * num_calls++;
12416 case CEE_MONO_NOT_TAKEN:
12417 cfg->cbb->out_of_line = TRUE;
12420 case CEE_MONO_TLS: {
12423 CHECK_STACK_OVF (1);
12425 key = (gint32)read32 (ip + 2);
12426 g_assert (key < TLS_KEY_NUM);
12428 ins = mono_create_tls_get (cfg, key);
12430 if (cfg->compile_aot) {
12432 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12433 ins->dreg = alloc_preg (cfg);
12434 ins->type = STACK_PTR;
12436 g_assert_not_reached ();
12439 ins->type = STACK_PTR;
12440 MONO_ADD_INS (cfg->cbb, ins);
12445 case CEE_MONO_DYN_CALL: {
12446 MonoCallInst *call;
12448 /* It would be easier to call a trampoline, but that would put an
12449 * extra frame on the stack, confusing exception handling. So
12450 * implement it inline using an opcode for now.
12453 if (!cfg->dyn_call_var) {
12454 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12455 /* prevent it from being register allocated */
12456 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12459 /* Has to use a call inst since it local regalloc expects it */
12460 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12461 ins = (MonoInst*)call;
12463 ins->sreg1 = sp [0]->dreg;
12464 ins->sreg2 = sp [1]->dreg;
12465 MONO_ADD_INS (cfg->cbb, ins);
12467 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12470 inline_costs += 10 * num_calls++;
12474 case CEE_MONO_MEMORY_BARRIER: {
12476 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12480 case CEE_MONO_JIT_ATTACH: {
12481 MonoInst *args [16], *domain_ins;
12482 MonoInst *ad_ins, *jit_tls_ins;
12483 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12485 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12487 EMIT_NEW_PCONST (cfg, ins, NULL);
12488 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12490 ad_ins = mono_get_domain_intrinsic (cfg);
12491 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12493 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12494 NEW_BBLOCK (cfg, next_bb);
12495 NEW_BBLOCK (cfg, call_bb);
12497 if (cfg->compile_aot) {
12498 /* AOT code is only used in the root domain */
12499 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12501 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12503 MONO_ADD_INS (cfg->cbb, ad_ins);
12504 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12507 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12511 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12512 MONO_START_BB (cfg, call_bb);
12515 if (cfg->compile_aot) {
12516 /* AOT code is only used in the root domain */
12517 EMIT_NEW_PCONST (cfg, args [0], NULL);
12519 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12521 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12522 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12525 MONO_START_BB (cfg, next_bb);
12529 case CEE_MONO_JIT_DETACH: {
12530 MonoInst *args [16];
12532 /* Restore the original domain */
12533 dreg = alloc_ireg (cfg);
12534 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12535 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12540 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12546 case CEE_PREFIX1: {
12549 case CEE_ARGLIST: {
12550 /* somewhat similar to LDTOKEN */
12551 MonoInst *addr, *vtvar;
12552 CHECK_STACK_OVF (1);
12553 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12555 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12556 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12558 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12559 ins->type = STACK_VTYPE;
12560 ins->klass = mono_defaults.argumenthandle_class;
12570 MonoInst *cmp, *arg1, *arg2;
12578 * The following transforms:
12579 * CEE_CEQ into OP_CEQ
12580 * CEE_CGT into OP_CGT
12581 * CEE_CGT_UN into OP_CGT_UN
12582 * CEE_CLT into OP_CLT
12583 * CEE_CLT_UN into OP_CLT_UN
12585 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12587 MONO_INST_NEW (cfg, ins, cmp->opcode);
12588 cmp->sreg1 = arg1->dreg;
12589 cmp->sreg2 = arg2->dreg;
12590 type_from_op (cfg, cmp, arg1, arg2);
12592 add_widen_op (cfg, cmp, &arg1, &arg2);
12593 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12594 cmp->opcode = OP_LCOMPARE;
12595 else if (arg1->type == STACK_R4)
12596 cmp->opcode = OP_RCOMPARE;
12597 else if (arg1->type == STACK_R8)
12598 cmp->opcode = OP_FCOMPARE;
12600 cmp->opcode = OP_ICOMPARE;
12601 MONO_ADD_INS (cfg->cbb, cmp);
12602 ins->type = STACK_I4;
12603 ins->dreg = alloc_dreg (cfg, ins->type);
12604 type_from_op (cfg, ins, arg1, arg2);
12606 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12608 * The backends expect the fceq opcodes to do the
12611 ins->sreg1 = cmp->sreg1;
12612 ins->sreg2 = cmp->sreg2;
12615 MONO_ADD_INS (cfg->cbb, ins);
12621 MonoInst *argconst;
12622 MonoMethod *cil_method;
12624 CHECK_STACK_OVF (1);
12626 n = read32 (ip + 2);
12627 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12628 if (!cmethod || mono_loader_get_last_error ())
12630 mono_class_init (cmethod->klass);
12632 mono_save_token_info (cfg, image, n, cmethod);
12634 context_used = mini_method_check_context_used (cfg, cmethod);
12636 cil_method = cmethod;
12637 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12638 METHOD_ACCESS_FAILURE (method, cil_method);
12640 if (mono_security_core_clr_enabled ())
12641 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12644 * Optimize the common case of ldftn+delegate creation
12646 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12647 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12648 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12649 MonoInst *target_ins, *handle_ins;
12650 MonoMethod *invoke;
12651 int invoke_context_used;
12653 invoke = mono_get_delegate_invoke (ctor_method->klass);
12654 if (!invoke || !mono_method_signature (invoke))
12657 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12659 target_ins = sp [-1];
12661 if (mono_security_core_clr_enabled ())
12662 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12664 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12665 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12666 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12667 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12668 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12672 /* FIXME: SGEN support */
12673 if (invoke_context_used == 0) {
12675 if (cfg->verbose_level > 3)
12676 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12677 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12680 CHECK_CFG_EXCEPTION;
12690 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12691 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12695 inline_costs += 10 * num_calls++;
12698 case CEE_LDVIRTFTN: {
12699 MonoInst *args [2];
12703 n = read32 (ip + 2);
12704 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12705 if (!cmethod || mono_loader_get_last_error ())
12707 mono_class_init (cmethod->klass);
12709 context_used = mini_method_check_context_used (cfg, cmethod);
12711 if (mono_security_core_clr_enabled ())
12712 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12715 * Optimize the common case of ldvirtftn+delegate creation
12717 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12718 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12719 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12720 MonoInst *target_ins, *handle_ins;
12721 MonoMethod *invoke;
12722 int invoke_context_used;
12723 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12725 invoke = mono_get_delegate_invoke (ctor_method->klass);
12726 if (!invoke || !mono_method_signature (invoke))
12729 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12731 target_ins = sp [-1];
12733 if (mono_security_core_clr_enabled ())
12734 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12736 /* FIXME: SGEN support */
12737 if (invoke_context_used == 0 || cfg->llvm_only) {
12739 if (cfg->verbose_level > 3)
12740 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12741 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12744 CHECK_CFG_EXCEPTION;
12757 args [1] = emit_get_rgctx_method (cfg, context_used,
12758 cmethod, MONO_RGCTX_INFO_METHOD);
12761 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12763 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12766 inline_costs += 10 * num_calls++;
12770 CHECK_STACK_OVF (1);
12772 n = read16 (ip + 2);
12774 EMIT_NEW_ARGLOAD (cfg, ins, n);
12779 CHECK_STACK_OVF (1);
12781 n = read16 (ip + 2);
12783 NEW_ARGLOADA (cfg, ins, n);
12784 MONO_ADD_INS (cfg->cbb, ins);
12792 n = read16 (ip + 2);
12794 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12796 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12800 CHECK_STACK_OVF (1);
12802 n = read16 (ip + 2);
12804 EMIT_NEW_LOCLOAD (cfg, ins, n);
12809 unsigned char *tmp_ip;
12810 CHECK_STACK_OVF (1);
12812 n = read16 (ip + 2);
12815 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12821 EMIT_NEW_LOCLOADA (cfg, ins, n);
12830 n = read16 (ip + 2);
12832 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12834 emit_stloc_ir (cfg, sp, header, n);
12841 if (sp != stack_start)
12843 if (cfg->method != method)
12845 * Inlining this into a loop in a parent could lead to
12846 * stack overflows which is different behavior than the
12847 * non-inlined case, thus disable inlining in this case.
12849 INLINE_FAILURE("localloc");
12851 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12852 ins->dreg = alloc_preg (cfg);
12853 ins->sreg1 = sp [0]->dreg;
12854 ins->type = STACK_PTR;
12855 MONO_ADD_INS (cfg->cbb, ins);
12857 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12859 ins->flags |= MONO_INST_INIT;
12864 case CEE_ENDFILTER: {
12865 MonoExceptionClause *clause, *nearest;
12870 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12872 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12873 ins->sreg1 = (*sp)->dreg;
12874 MONO_ADD_INS (cfg->cbb, ins);
12875 start_new_bblock = 1;
12879 for (cc = 0; cc < header->num_clauses; ++cc) {
12880 clause = &header->clauses [cc];
12881 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12882 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12883 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12886 g_assert (nearest);
12887 if ((ip - header->code) != nearest->handler_offset)
12892 case CEE_UNALIGNED_:
12893 ins_flag |= MONO_INST_UNALIGNED;
12894 /* FIXME: record alignment? we can assume 1 for now */
12898 case CEE_VOLATILE_:
12899 ins_flag |= MONO_INST_VOLATILE;
12903 ins_flag |= MONO_INST_TAILCALL;
12904 cfg->flags |= MONO_CFG_HAS_TAIL;
12905 /* Can't inline tail calls at this time */
12906 inline_costs += 100000;
12913 token = read32 (ip + 2);
12914 klass = mini_get_class (method, token, generic_context);
12915 CHECK_TYPELOAD (klass);
12916 if (generic_class_is_reference_type (cfg, klass))
12917 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12919 mini_emit_initobj (cfg, *sp, NULL, klass);
12923 case CEE_CONSTRAINED_:
12925 token = read32 (ip + 2);
12926 constrained_class = mini_get_class (method, token, generic_context);
12927 CHECK_TYPELOAD (constrained_class);
12931 case CEE_INITBLK: {
12932 MonoInst *iargs [3];
12936 /* Skip optimized paths for volatile operations. */
12937 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12938 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12939 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12940 /* emit_memset only works when val == 0 */
12941 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12944 iargs [0] = sp [0];
12945 iargs [1] = sp [1];
12946 iargs [2] = sp [2];
12947 if (ip [1] == CEE_CPBLK) {
12949 * FIXME: It's unclear whether we should be emitting both the acquire
12950 * and release barriers for cpblk. It is technically both a load and
12951 * store operation, so it seems like that's the sensible thing to do.
12953 * FIXME: We emit full barriers on both sides of the operation for
12954 * simplicity. We should have a separate atomic memcpy method instead.
12956 MonoMethod *memcpy_method = get_memcpy_method ();
12958 if (ins_flag & MONO_INST_VOLATILE)
12959 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12961 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12962 call->flags |= ins_flag;
12964 if (ins_flag & MONO_INST_VOLATILE)
12965 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12967 MonoMethod *memset_method = get_memset_method ();
12968 if (ins_flag & MONO_INST_VOLATILE) {
12969 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12970 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12972 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12973 call->flags |= ins_flag;
12984 ins_flag |= MONO_INST_NOTYPECHECK;
12986 ins_flag |= MONO_INST_NORANGECHECK;
12987 /* we ignore the no-nullcheck for now since we
12988 * really do it explicitly only when doing callvirt->call
12992 case CEE_RETHROW: {
12994 int handler_offset = -1;
12996 for (i = 0; i < header->num_clauses; ++i) {
12997 MonoExceptionClause *clause = &header->clauses [i];
12998 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12999 handler_offset = clause->handler_offset;
13004 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13006 if (handler_offset == -1)
13009 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13010 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13011 ins->sreg1 = load->dreg;
13012 MONO_ADD_INS (cfg->cbb, ins);
13014 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13015 MONO_ADD_INS (cfg->cbb, ins);
13018 link_bblock (cfg, cfg->cbb, end_bblock);
13019 start_new_bblock = 1;
13027 CHECK_STACK_OVF (1);
13029 token = read32 (ip + 2);
13030 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13031 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13034 val = mono_type_size (type, &ialign);
13036 MonoClass *klass = mini_get_class (method, token, generic_context);
13037 CHECK_TYPELOAD (klass);
13039 val = mono_type_size (&klass->byval_arg, &ialign);
13041 if (mini_is_gsharedvt_klass (klass))
13042 GSHAREDVT_FAILURE (*ip);
13044 EMIT_NEW_ICONST (cfg, ins, val);
13049 case CEE_REFANYTYPE: {
13050 MonoInst *src_var, *src;
13052 GSHAREDVT_FAILURE (*ip);
13058 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13060 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13061 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13062 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13067 case CEE_READONLY_:
13080 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13090 g_warning ("opcode 0x%02x not handled", *ip);
13094 if (start_new_bblock != 1)
13097 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13098 if (cfg->cbb->next_bb) {
13099 /* This could already be set because of inlining, #693905 */
13100 MonoBasicBlock *bb = cfg->cbb;
13102 while (bb->next_bb)
13104 bb->next_bb = end_bblock;
13106 cfg->cbb->next_bb = end_bblock;
13109 if (cfg->method == method && cfg->domainvar) {
13111 MonoInst *get_domain;
13113 cfg->cbb = init_localsbb;
13115 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13116 MONO_ADD_INS (cfg->cbb, get_domain);
13118 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13120 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13121 MONO_ADD_INS (cfg->cbb, store);
13124 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13125 if (cfg->compile_aot)
13126 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13127 mono_get_got_var (cfg);
13130 if (cfg->method == method && cfg->got_var)
13131 mono_emit_load_got_addr (cfg);
13133 if (init_localsbb) {
13134 cfg->cbb = init_localsbb;
13136 for (i = 0; i < header->num_locals; ++i) {
13137 emit_init_local (cfg, i, header->locals [i], init_locals);
13141 if (cfg->init_ref_vars && cfg->method == method) {
13142 /* Emit initialization for ref vars */
13143 // FIXME: Avoid duplication initialization for IL locals.
13144 for (i = 0; i < cfg->num_varinfo; ++i) {
13145 MonoInst *ins = cfg->varinfo [i];
13147 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13148 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13152 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13153 cfg->cbb = init_localsbb;
13154 emit_push_lmf (cfg);
13157 cfg->cbb = init_localsbb;
13158 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13161 MonoBasicBlock *bb;
13164 * Make seq points at backward branch targets interruptable.
13166 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13167 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13168 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13171 /* Add a sequence point for method entry/exit events */
13172 if (seq_points && cfg->gen_sdb_seq_points) {
13173 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13174 MONO_ADD_INS (init_localsbb, ins);
13175 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13176 MONO_ADD_INS (cfg->bb_exit, ins);
13180 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13181 * the code they refer to was dead (#11880).
13183 if (sym_seq_points) {
13184 for (i = 0; i < header->code_size; ++i) {
13185 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13188 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13189 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13196 if (cfg->method == method) {
13197 MonoBasicBlock *bb;
13198 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13199 bb->region = mono_find_block_region (cfg, bb->real_offset);
13201 mono_create_spvar_for_region (cfg, bb->region);
13202 if (cfg->verbose_level > 2)
13203 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13207 if (inline_costs < 0) {
13210 /* Method is too large */
13211 mname = mono_method_full_name (method, TRUE);
13212 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13213 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13217 if ((cfg->verbose_level > 2) && (cfg->method == method))
13218 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13223 g_assert (!mono_error_ok (&cfg->error));
13227 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13231 set_exception_type_from_invalid_il (cfg, method, ip);
13235 g_slist_free (class_inits);
13236 mono_basic_block_free (original_bb);
13237 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13238 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13239 if (cfg->exception_type)
13242 return inline_costs;
13246 store_membase_reg_to_store_membase_imm (int opcode)
13249 case OP_STORE_MEMBASE_REG:
13250 return OP_STORE_MEMBASE_IMM;
13251 case OP_STOREI1_MEMBASE_REG:
13252 return OP_STOREI1_MEMBASE_IMM;
13253 case OP_STOREI2_MEMBASE_REG:
13254 return OP_STOREI2_MEMBASE_IMM;
13255 case OP_STOREI4_MEMBASE_REG:
13256 return OP_STOREI4_MEMBASE_IMM;
13257 case OP_STOREI8_MEMBASE_REG:
13258 return OP_STOREI8_MEMBASE_IMM;
13260 g_assert_not_reached ();
13267 mono_op_to_op_imm (int opcode)
13271 return OP_IADD_IMM;
13273 return OP_ISUB_IMM;
13275 return OP_IDIV_IMM;
13277 return OP_IDIV_UN_IMM;
13279 return OP_IREM_IMM;
13281 return OP_IREM_UN_IMM;
13283 return OP_IMUL_IMM;
13285 return OP_IAND_IMM;
13289 return OP_IXOR_IMM;
13291 return OP_ISHL_IMM;
13293 return OP_ISHR_IMM;
13295 return OP_ISHR_UN_IMM;
13298 return OP_LADD_IMM;
13300 return OP_LSUB_IMM;
13302 return OP_LAND_IMM;
13306 return OP_LXOR_IMM;
13308 return OP_LSHL_IMM;
13310 return OP_LSHR_IMM;
13312 return OP_LSHR_UN_IMM;
13313 #if SIZEOF_REGISTER == 8
13315 return OP_LREM_IMM;
13319 return OP_COMPARE_IMM;
13321 return OP_ICOMPARE_IMM;
13323 return OP_LCOMPARE_IMM;
13325 case OP_STORE_MEMBASE_REG:
13326 return OP_STORE_MEMBASE_IMM;
13327 case OP_STOREI1_MEMBASE_REG:
13328 return OP_STOREI1_MEMBASE_IMM;
13329 case OP_STOREI2_MEMBASE_REG:
13330 return OP_STOREI2_MEMBASE_IMM;
13331 case OP_STOREI4_MEMBASE_REG:
13332 return OP_STOREI4_MEMBASE_IMM;
13334 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13336 return OP_X86_PUSH_IMM;
13337 case OP_X86_COMPARE_MEMBASE_REG:
13338 return OP_X86_COMPARE_MEMBASE_IMM;
13340 #if defined(TARGET_AMD64)
13341 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13342 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13344 case OP_VOIDCALL_REG:
13345 return OP_VOIDCALL;
13353 return OP_LOCALLOC_IMM;
13360 ldind_to_load_membase (int opcode)
13364 return OP_LOADI1_MEMBASE;
13366 return OP_LOADU1_MEMBASE;
13368 return OP_LOADI2_MEMBASE;
13370 return OP_LOADU2_MEMBASE;
13372 return OP_LOADI4_MEMBASE;
13374 return OP_LOADU4_MEMBASE;
13376 return OP_LOAD_MEMBASE;
13377 case CEE_LDIND_REF:
13378 return OP_LOAD_MEMBASE;
13380 return OP_LOADI8_MEMBASE;
13382 return OP_LOADR4_MEMBASE;
13384 return OP_LOADR8_MEMBASE;
13386 g_assert_not_reached ();
13393 stind_to_store_membase (int opcode)
13397 return OP_STOREI1_MEMBASE_REG;
13399 return OP_STOREI2_MEMBASE_REG;
13401 return OP_STOREI4_MEMBASE_REG;
13403 case CEE_STIND_REF:
13404 return OP_STORE_MEMBASE_REG;
13406 return OP_STOREI8_MEMBASE_REG;
13408 return OP_STORER4_MEMBASE_REG;
13410 return OP_STORER8_MEMBASE_REG;
13412 g_assert_not_reached ();
13419 mono_load_membase_to_load_mem (int opcode)
13421 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13422 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13424 case OP_LOAD_MEMBASE:
13425 return OP_LOAD_MEM;
13426 case OP_LOADU1_MEMBASE:
13427 return OP_LOADU1_MEM;
13428 case OP_LOADU2_MEMBASE:
13429 return OP_LOADU2_MEM;
13430 case OP_LOADI4_MEMBASE:
13431 return OP_LOADI4_MEM;
13432 case OP_LOADU4_MEMBASE:
13433 return OP_LOADU4_MEM;
13434 #if SIZEOF_REGISTER == 8
13435 case OP_LOADI8_MEMBASE:
13436 return OP_LOADI8_MEM;
13445 op_to_op_dest_membase (int store_opcode, int opcode)
13447 #if defined(TARGET_X86)
13448 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13453 return OP_X86_ADD_MEMBASE_REG;
13455 return OP_X86_SUB_MEMBASE_REG;
13457 return OP_X86_AND_MEMBASE_REG;
13459 return OP_X86_OR_MEMBASE_REG;
13461 return OP_X86_XOR_MEMBASE_REG;
13464 return OP_X86_ADD_MEMBASE_IMM;
13467 return OP_X86_SUB_MEMBASE_IMM;
13470 return OP_X86_AND_MEMBASE_IMM;
13473 return OP_X86_OR_MEMBASE_IMM;
13476 return OP_X86_XOR_MEMBASE_IMM;
13482 #if defined(TARGET_AMD64)
13483 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13488 return OP_X86_ADD_MEMBASE_REG;
13490 return OP_X86_SUB_MEMBASE_REG;
13492 return OP_X86_AND_MEMBASE_REG;
13494 return OP_X86_OR_MEMBASE_REG;
13496 return OP_X86_XOR_MEMBASE_REG;
13498 return OP_X86_ADD_MEMBASE_IMM;
13500 return OP_X86_SUB_MEMBASE_IMM;
13502 return OP_X86_AND_MEMBASE_IMM;
13504 return OP_X86_OR_MEMBASE_IMM;
13506 return OP_X86_XOR_MEMBASE_IMM;
13508 return OP_AMD64_ADD_MEMBASE_REG;
13510 return OP_AMD64_SUB_MEMBASE_REG;
13512 return OP_AMD64_AND_MEMBASE_REG;
13514 return OP_AMD64_OR_MEMBASE_REG;
13516 return OP_AMD64_XOR_MEMBASE_REG;
13519 return OP_AMD64_ADD_MEMBASE_IMM;
13522 return OP_AMD64_SUB_MEMBASE_IMM;
13525 return OP_AMD64_AND_MEMBASE_IMM;
13528 return OP_AMD64_OR_MEMBASE_IMM;
13531 return OP_AMD64_XOR_MEMBASE_IMM;
13541 op_to_op_store_membase (int store_opcode, int opcode)
13543 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13546 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13547 return OP_X86_SETEQ_MEMBASE;
13549 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13550 return OP_X86_SETNE_MEMBASE;
13558 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13561 /* FIXME: This has sign extension issues */
13563 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13564 return OP_X86_COMPARE_MEMBASE8_IMM;
13567 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13572 return OP_X86_PUSH_MEMBASE;
13573 case OP_COMPARE_IMM:
13574 case OP_ICOMPARE_IMM:
13575 return OP_X86_COMPARE_MEMBASE_IMM;
13578 return OP_X86_COMPARE_MEMBASE_REG;
13582 #ifdef TARGET_AMD64
13583 /* FIXME: This has sign extension issues */
13585 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13586 return OP_X86_COMPARE_MEMBASE8_IMM;
13591 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13592 return OP_X86_PUSH_MEMBASE;
13594 /* FIXME: This only works for 32 bit immediates
13595 case OP_COMPARE_IMM:
13596 case OP_LCOMPARE_IMM:
13597 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13598 return OP_AMD64_COMPARE_MEMBASE_IMM;
13600 case OP_ICOMPARE_IMM:
13601 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13602 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13606 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13607 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13608 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13609 return OP_AMD64_COMPARE_MEMBASE_REG;
13612 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13613 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13622 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13625 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13631 return OP_X86_COMPARE_REG_MEMBASE;
13633 return OP_X86_ADD_REG_MEMBASE;
13635 return OP_X86_SUB_REG_MEMBASE;
13637 return OP_X86_AND_REG_MEMBASE;
13639 return OP_X86_OR_REG_MEMBASE;
13641 return OP_X86_XOR_REG_MEMBASE;
13645 #ifdef TARGET_AMD64
13646 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13649 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13651 return OP_X86_ADD_REG_MEMBASE;
13653 return OP_X86_SUB_REG_MEMBASE;
13655 return OP_X86_AND_REG_MEMBASE;
13657 return OP_X86_OR_REG_MEMBASE;
13659 return OP_X86_XOR_REG_MEMBASE;
13661 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13665 return OP_AMD64_COMPARE_REG_MEMBASE;
13667 return OP_AMD64_ADD_REG_MEMBASE;
13669 return OP_AMD64_SUB_REG_MEMBASE;
13671 return OP_AMD64_AND_REG_MEMBASE;
13673 return OP_AMD64_OR_REG_MEMBASE;
13675 return OP_AMD64_XOR_REG_MEMBASE;
13684 mono_op_to_op_imm_noemul (int opcode)
13687 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13693 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13700 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13705 return mono_op_to_op_imm (opcode);
13710 * mono_handle_global_vregs:
13712 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13716 mono_handle_global_vregs (MonoCompile *cfg)
13718 gint32 *vreg_to_bb;
13719 MonoBasicBlock *bb;
13722 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13724 #ifdef MONO_ARCH_SIMD_INTRINSICS
13725 if (cfg->uses_simd_intrinsics)
13726 mono_simd_simplify_indirection (cfg);
13729 /* Find local vregs used in more than one bb */
13730 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13731 MonoInst *ins = bb->code;
13732 int block_num = bb->block_num;
13734 if (cfg->verbose_level > 2)
13735 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13738 for (; ins; ins = ins->next) {
13739 const char *spec = INS_INFO (ins->opcode);
13740 int regtype = 0, regindex;
13743 if (G_UNLIKELY (cfg->verbose_level > 2))
13744 mono_print_ins (ins);
13746 g_assert (ins->opcode >= MONO_CEE_LAST);
13748 for (regindex = 0; regindex < 4; regindex ++) {
13751 if (regindex == 0) {
13752 regtype = spec [MONO_INST_DEST];
13753 if (regtype == ' ')
13756 } else if (regindex == 1) {
13757 regtype = spec [MONO_INST_SRC1];
13758 if (regtype == ' ')
13761 } else if (regindex == 2) {
13762 regtype = spec [MONO_INST_SRC2];
13763 if (regtype == ' ')
13766 } else if (regindex == 3) {
13767 regtype = spec [MONO_INST_SRC3];
13768 if (regtype == ' ')
13773 #if SIZEOF_REGISTER == 4
13774 /* In the LLVM case, the long opcodes are not decomposed */
13775 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13777 * Since some instructions reference the original long vreg,
13778 * and some reference the two component vregs, it is quite hard
13779 * to determine when it needs to be global. So be conservative.
13781 if (!get_vreg_to_inst (cfg, vreg)) {
13782 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13784 if (cfg->verbose_level > 2)
13785 printf ("LONG VREG R%d made global.\n", vreg);
13789 * Make the component vregs volatile since the optimizations can
13790 * get confused otherwise.
13792 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13793 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13797 g_assert (vreg != -1);
13799 prev_bb = vreg_to_bb [vreg];
13800 if (prev_bb == 0) {
13801 /* 0 is a valid block num */
13802 vreg_to_bb [vreg] = block_num + 1;
13803 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13804 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13807 if (!get_vreg_to_inst (cfg, vreg)) {
13808 if (G_UNLIKELY (cfg->verbose_level > 2))
13809 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13813 if (vreg_is_ref (cfg, vreg))
13814 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13816 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13819 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13822 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13825 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13828 g_assert_not_reached ();
13832 /* Flag as having been used in more than one bb */
13833 vreg_to_bb [vreg] = -1;
13839 /* If a variable is used in only one bblock, convert it into a local vreg */
13840 for (i = 0; i < cfg->num_varinfo; i++) {
13841 MonoInst *var = cfg->varinfo [i];
13842 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13844 switch (var->type) {
13850 #if SIZEOF_REGISTER == 8
13853 #if !defined(TARGET_X86)
13854 /* Enabling this screws up the fp stack on x86 */
13857 if (mono_arch_is_soft_float ())
13860 /* Arguments are implicitly global */
13861 /* Putting R4 vars into registers doesn't work currently */
13862 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13863 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13865 * Make that the variable's liveness interval doesn't contain a call, since
13866 * that would cause the lvreg to be spilled, making the whole optimization
13869 /* This is too slow for JIT compilation */
13871 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13873 int def_index, call_index, ins_index;
13874 gboolean spilled = FALSE;
13879 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13880 const char *spec = INS_INFO (ins->opcode);
13882 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13883 def_index = ins_index;
13885 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13886 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13887 if (call_index > def_index) {
13893 if (MONO_IS_CALL (ins))
13894 call_index = ins_index;
13904 if (G_UNLIKELY (cfg->verbose_level > 2))
13905 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13906 var->flags |= MONO_INST_IS_DEAD;
13907 cfg->vreg_to_inst [var->dreg] = NULL;
13914 * Compress the varinfo and vars tables so the liveness computation is faster and
13915 * takes up less space.
13918 for (i = 0; i < cfg->num_varinfo; ++i) {
13919 MonoInst *var = cfg->varinfo [i];
13920 if (pos < i && cfg->locals_start == i)
13921 cfg->locals_start = pos;
13922 if (!(var->flags & MONO_INST_IS_DEAD)) {
13924 cfg->varinfo [pos] = cfg->varinfo [i];
13925 cfg->varinfo [pos]->inst_c0 = pos;
13926 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13927 cfg->vars [pos].idx = pos;
13928 #if SIZEOF_REGISTER == 4
13929 if (cfg->varinfo [pos]->type == STACK_I8) {
13930 /* Modify the two component vars too */
13933 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13934 var1->inst_c0 = pos;
13935 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13936 var1->inst_c0 = pos;
13943 cfg->num_varinfo = pos;
13944 if (cfg->locals_start > cfg->num_varinfo)
13945 cfg->locals_start = cfg->num_varinfo;
13949 * mono_spill_global_vars:
13951 * Generate spill code for variables which are not allocated to registers,
13952 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13953 * code is generated which could be optimized by the local optimization passes.
13956 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13958 MonoBasicBlock *bb;
13960 int orig_next_vreg;
13961 guint32 *vreg_to_lvreg;
13963 guint32 i, lvregs_len;
13964 gboolean dest_has_lvreg = FALSE;
13965 guint32 stacktypes [128];
13966 MonoInst **live_range_start, **live_range_end;
13967 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13968 int *gsharedvt_vreg_to_idx = NULL;
13970 *need_local_opts = FALSE;
13972 memset (spec2, 0, sizeof (spec2));
13974 /* FIXME: Move this function to mini.c */
13975 stacktypes ['i'] = STACK_PTR;
13976 stacktypes ['l'] = STACK_I8;
13977 stacktypes ['f'] = STACK_R8;
13978 #ifdef MONO_ARCH_SIMD_INTRINSICS
13979 stacktypes ['x'] = STACK_VTYPE;
13982 #if SIZEOF_REGISTER == 4
13983 /* Create MonoInsts for longs */
13984 for (i = 0; i < cfg->num_varinfo; i++) {
13985 MonoInst *ins = cfg->varinfo [i];
13987 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13988 switch (ins->type) {
13993 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13996 g_assert (ins->opcode == OP_REGOFFSET);
13998 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
14000 tree->opcode = OP_REGOFFSET;
14001 tree->inst_basereg = ins->inst_basereg;
14002 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14004 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
14006 tree->opcode = OP_REGOFFSET;
14007 tree->inst_basereg = ins->inst_basereg;
14008 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14018 if (cfg->compute_gc_maps) {
14019 /* registers need liveness info even for !non refs */
14020 for (i = 0; i < cfg->num_varinfo; i++) {
14021 MonoInst *ins = cfg->varinfo [i];
14023 if (ins->opcode == OP_REGVAR)
14024 ins->flags |= MONO_INST_GC_TRACK;
14028 if (cfg->gsharedvt) {
14029 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14031 for (i = 0; i < cfg->num_varinfo; ++i) {
14032 MonoInst *ins = cfg->varinfo [i];
14035 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14036 if (i >= cfg->locals_start) {
14038 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14039 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14040 ins->opcode = OP_GSHAREDVT_LOCAL;
14041 ins->inst_imm = idx;
14044 gsharedvt_vreg_to_idx [ins->dreg] = -1;
14045 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14051 /* FIXME: widening and truncation */
14054 * As an optimization, when a variable allocated to the stack is first loaded into
14055 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14056 * the variable again.
14058 orig_next_vreg = cfg->next_vreg;
14059 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14060 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14064 * These arrays contain the first and last instructions accessing a given
14066 * Since we emit bblocks in the same order we process them here, and we
14067 * don't split live ranges, these will precisely describe the live range of
14068 * the variable, i.e. the instruction range where a valid value can be found
14069 * in the variables location.
14070 * The live range is computed using the liveness info computed by the liveness pass.
14071 * We can't use vmv->range, since that is an abstract live range, and we need
14072 * one which is instruction precise.
14073 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14075 /* FIXME: Only do this if debugging info is requested */
14076 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14077 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14078 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14079 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14081 /* Add spill loads/stores */
14082 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14085 if (cfg->verbose_level > 2)
14086 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14088 /* Clear vreg_to_lvreg array */
14089 for (i = 0; i < lvregs_len; i++)
14090 vreg_to_lvreg [lvregs [i]] = 0;
14094 MONO_BB_FOR_EACH_INS (bb, ins) {
14095 const char *spec = INS_INFO (ins->opcode);
14096 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14097 gboolean store, no_lvreg;
14098 int sregs [MONO_MAX_SRC_REGS];
14100 if (G_UNLIKELY (cfg->verbose_level > 2))
14101 mono_print_ins (ins);
14103 if (ins->opcode == OP_NOP)
14107 * We handle LDADDR here as well, since it can only be decomposed
14108 * when variable addresses are known.
14110 if (ins->opcode == OP_LDADDR) {
14111 MonoInst *var = ins->inst_p0;
14113 if (var->opcode == OP_VTARG_ADDR) {
14114 /* Happens on SPARC/S390 where vtypes are passed by reference */
14115 MonoInst *vtaddr = var->inst_left;
14116 if (vtaddr->opcode == OP_REGVAR) {
14117 ins->opcode = OP_MOVE;
14118 ins->sreg1 = vtaddr->dreg;
14120 else if (var->inst_left->opcode == OP_REGOFFSET) {
14121 ins->opcode = OP_LOAD_MEMBASE;
14122 ins->inst_basereg = vtaddr->inst_basereg;
14123 ins->inst_offset = vtaddr->inst_offset;
14126 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14127 /* gsharedvt arg passed by ref */
14128 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14130 ins->opcode = OP_LOAD_MEMBASE;
14131 ins->inst_basereg = var->inst_basereg;
14132 ins->inst_offset = var->inst_offset;
14133 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14134 MonoInst *load, *load2, *load3;
14135 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14136 int reg1, reg2, reg3;
14137 MonoInst *info_var = cfg->gsharedvt_info_var;
14138 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14142 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14145 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14147 g_assert (info_var);
14148 g_assert (locals_var);
14150 /* Mark the instruction used to compute the locals var as used */
14151 cfg->gsharedvt_locals_var_ins = NULL;
14153 /* Load the offset */
14154 if (info_var->opcode == OP_REGOFFSET) {
14155 reg1 = alloc_ireg (cfg);
14156 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14157 } else if (info_var->opcode == OP_REGVAR) {
14159 reg1 = info_var->dreg;
14161 g_assert_not_reached ();
14163 reg2 = alloc_ireg (cfg);
14164 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14165 /* Load the locals area address */
14166 reg3 = alloc_ireg (cfg);
14167 if (locals_var->opcode == OP_REGOFFSET) {
14168 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14169 } else if (locals_var->opcode == OP_REGVAR) {
14170 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14172 g_assert_not_reached ();
14174 /* Compute the address */
14175 ins->opcode = OP_PADD;
14179 mono_bblock_insert_before_ins (bb, ins, load3);
14180 mono_bblock_insert_before_ins (bb, load3, load2);
14182 mono_bblock_insert_before_ins (bb, load2, load);
14184 g_assert (var->opcode == OP_REGOFFSET);
14186 ins->opcode = OP_ADD_IMM;
14187 ins->sreg1 = var->inst_basereg;
14188 ins->inst_imm = var->inst_offset;
14191 *need_local_opts = TRUE;
14192 spec = INS_INFO (ins->opcode);
14195 if (ins->opcode < MONO_CEE_LAST) {
14196 mono_print_ins (ins);
14197 g_assert_not_reached ();
14201 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14205 if (MONO_IS_STORE_MEMBASE (ins)) {
14206 tmp_reg = ins->dreg;
14207 ins->dreg = ins->sreg2;
14208 ins->sreg2 = tmp_reg;
14211 spec2 [MONO_INST_DEST] = ' ';
14212 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14213 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14214 spec2 [MONO_INST_SRC3] = ' ';
14216 } else if (MONO_IS_STORE_MEMINDEX (ins))
14217 g_assert_not_reached ();
14222 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14223 printf ("\t %.3s %d", spec, ins->dreg);
14224 num_sregs = mono_inst_get_src_registers (ins, sregs);
14225 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14226 printf (" %d", sregs [srcindex]);
14233 regtype = spec [MONO_INST_DEST];
14234 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14237 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14238 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14239 MonoInst *store_ins;
14241 MonoInst *def_ins = ins;
14242 int dreg = ins->dreg; /* The original vreg */
14244 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14246 if (var->opcode == OP_REGVAR) {
14247 ins->dreg = var->dreg;
14248 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14250 * Instead of emitting a load+store, use a _membase opcode.
14252 g_assert (var->opcode == OP_REGOFFSET);
14253 if (ins->opcode == OP_MOVE) {
14257 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14258 ins->inst_basereg = var->inst_basereg;
14259 ins->inst_offset = var->inst_offset;
14262 spec = INS_INFO (ins->opcode);
14266 g_assert (var->opcode == OP_REGOFFSET);
14268 prev_dreg = ins->dreg;
14270 /* Invalidate any previous lvreg for this vreg */
14271 vreg_to_lvreg [ins->dreg] = 0;
14275 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14277 store_opcode = OP_STOREI8_MEMBASE_REG;
14280 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14282 #if SIZEOF_REGISTER != 8
14283 if (regtype == 'l') {
14284 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14285 mono_bblock_insert_after_ins (bb, ins, store_ins);
14286 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14287 mono_bblock_insert_after_ins (bb, ins, store_ins);
14288 def_ins = store_ins;
14293 g_assert (store_opcode != OP_STOREV_MEMBASE);
14295 /* Try to fuse the store into the instruction itself */
14296 /* FIXME: Add more instructions */
14297 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14298 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14299 ins->inst_imm = ins->inst_c0;
14300 ins->inst_destbasereg = var->inst_basereg;
14301 ins->inst_offset = var->inst_offset;
14302 spec = INS_INFO (ins->opcode);
14303 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14304 ins->opcode = store_opcode;
14305 ins->inst_destbasereg = var->inst_basereg;
14306 ins->inst_offset = var->inst_offset;
14310 tmp_reg = ins->dreg;
14311 ins->dreg = ins->sreg2;
14312 ins->sreg2 = tmp_reg;
14315 spec2 [MONO_INST_DEST] = ' ';
14316 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14317 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14318 spec2 [MONO_INST_SRC3] = ' ';
14320 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14321 // FIXME: The backends expect the base reg to be in inst_basereg
14322 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14324 ins->inst_basereg = var->inst_basereg;
14325 ins->inst_offset = var->inst_offset;
14326 spec = INS_INFO (ins->opcode);
14328 /* printf ("INS: "); mono_print_ins (ins); */
14329 /* Create a store instruction */
14330 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14332 /* Insert it after the instruction */
14333 mono_bblock_insert_after_ins (bb, ins, store_ins);
14335 def_ins = store_ins;
14338 * We can't assign ins->dreg to var->dreg here, since the
14339 * sregs could use it. So set a flag, and do it after
14342 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14343 dest_has_lvreg = TRUE;
14348 if (def_ins && !live_range_start [dreg]) {
14349 live_range_start [dreg] = def_ins;
14350 live_range_start_bb [dreg] = bb;
14353 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14356 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14357 tmp->inst_c1 = dreg;
14358 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14365 num_sregs = mono_inst_get_src_registers (ins, sregs);
14366 for (srcindex = 0; srcindex < 3; ++srcindex) {
14367 regtype = spec [MONO_INST_SRC1 + srcindex];
14368 sreg = sregs [srcindex];
14370 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14371 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14372 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14373 MonoInst *use_ins = ins;
14374 MonoInst *load_ins;
14375 guint32 load_opcode;
14377 if (var->opcode == OP_REGVAR) {
14378 sregs [srcindex] = var->dreg;
14379 //mono_inst_set_src_registers (ins, sregs);
14380 live_range_end [sreg] = use_ins;
14381 live_range_end_bb [sreg] = bb;
14383 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14386 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14387 /* var->dreg is a hreg */
14388 tmp->inst_c1 = sreg;
14389 mono_bblock_insert_after_ins (bb, ins, tmp);
14395 g_assert (var->opcode == OP_REGOFFSET);
14397 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14399 g_assert (load_opcode != OP_LOADV_MEMBASE);
14401 if (vreg_to_lvreg [sreg]) {
14402 g_assert (vreg_to_lvreg [sreg] != -1);
14404 /* The variable is already loaded to an lvreg */
14405 if (G_UNLIKELY (cfg->verbose_level > 2))
14406 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14407 sregs [srcindex] = vreg_to_lvreg [sreg];
14408 //mono_inst_set_src_registers (ins, sregs);
14412 /* Try to fuse the load into the instruction */
14413 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14414 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14415 sregs [0] = var->inst_basereg;
14416 //mono_inst_set_src_registers (ins, sregs);
14417 ins->inst_offset = var->inst_offset;
14418 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14419 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14420 sregs [1] = var->inst_basereg;
14421 //mono_inst_set_src_registers (ins, sregs);
14422 ins->inst_offset = var->inst_offset;
14424 if (MONO_IS_REAL_MOVE (ins)) {
14425 ins->opcode = OP_NOP;
14428 //printf ("%d ", srcindex); mono_print_ins (ins);
14430 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14432 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14433 if (var->dreg == prev_dreg) {
14435 * sreg refers to the value loaded by the load
14436 * emitted below, but we need to use ins->dreg
14437 * since it refers to the store emitted earlier.
14441 g_assert (sreg != -1);
14442 vreg_to_lvreg [var->dreg] = sreg;
14443 g_assert (lvregs_len < 1024);
14444 lvregs [lvregs_len ++] = var->dreg;
14448 sregs [srcindex] = sreg;
14449 //mono_inst_set_src_registers (ins, sregs);
14451 #if SIZEOF_REGISTER != 8
14452 if (regtype == 'l') {
14453 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14454 mono_bblock_insert_before_ins (bb, ins, load_ins);
14455 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14456 mono_bblock_insert_before_ins (bb, ins, load_ins);
14457 use_ins = load_ins;
14462 #if SIZEOF_REGISTER == 4
14463 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14465 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14466 mono_bblock_insert_before_ins (bb, ins, load_ins);
14467 use_ins = load_ins;
14471 if (var->dreg < orig_next_vreg) {
14472 live_range_end [var->dreg] = use_ins;
14473 live_range_end_bb [var->dreg] = bb;
14476 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14479 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14480 tmp->inst_c1 = var->dreg;
14481 mono_bblock_insert_after_ins (bb, ins, tmp);
14485 mono_inst_set_src_registers (ins, sregs);
14487 if (dest_has_lvreg) {
14488 g_assert (ins->dreg != -1);
14489 vreg_to_lvreg [prev_dreg] = ins->dreg;
14490 g_assert (lvregs_len < 1024);
14491 lvregs [lvregs_len ++] = prev_dreg;
14492 dest_has_lvreg = FALSE;
14496 tmp_reg = ins->dreg;
14497 ins->dreg = ins->sreg2;
14498 ins->sreg2 = tmp_reg;
14501 if (MONO_IS_CALL (ins)) {
14502 /* Clear vreg_to_lvreg array */
14503 for (i = 0; i < lvregs_len; i++)
14504 vreg_to_lvreg [lvregs [i]] = 0;
14506 } else if (ins->opcode == OP_NOP) {
14508 MONO_INST_NULLIFY_SREGS (ins);
14511 if (cfg->verbose_level > 2)
14512 mono_print_ins_index (1, ins);
14515 /* Extend the live range based on the liveness info */
14516 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14517 for (i = 0; i < cfg->num_varinfo; i ++) {
14518 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14520 if (vreg_is_volatile (cfg, vi->vreg))
14521 /* The liveness info is incomplete */
14524 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14525 /* Live from at least the first ins of this bb */
14526 live_range_start [vi->vreg] = bb->code;
14527 live_range_start_bb [vi->vreg] = bb;
14530 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14531 /* Live at least until the last ins of this bb */
14532 live_range_end [vi->vreg] = bb->last_ins;
14533 live_range_end_bb [vi->vreg] = bb;
14540 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14541 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14543 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14544 for (i = 0; i < cfg->num_varinfo; ++i) {
14545 int vreg = MONO_VARINFO (cfg, i)->vreg;
14548 if (live_range_start [vreg]) {
14549 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14551 ins->inst_c1 = vreg;
14552 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14554 if (live_range_end [vreg]) {
14555 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14557 ins->inst_c1 = vreg;
14558 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14559 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14561 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14566 if (cfg->gsharedvt_locals_var_ins) {
14567 /* Nullify if unused */
14568 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14569 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14572 g_free (live_range_start);
14573 g_free (live_range_end);
14574 g_free (live_range_start_bb);
14575 g_free (live_range_end_bb);
14580 * - use 'iadd' instead of 'int_add'
14581 * - handling ovf opcodes: decompose in method_to_ir.
14582 * - unify iregs/fregs
14583 * -> partly done, the missing parts are:
14584 * - a more complete unification would involve unifying the hregs as well, so
14585 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14586 * would no longer map to the machine hregs, so the code generators would need to
14587 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14588 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14589 * fp/non-fp branches speeds it up by about 15%.
14590 * - use sext/zext opcodes instead of shifts
14592 * - get rid of TEMPLOADs if possible and use vregs instead
14593 * - clean up usage of OP_P/OP_ opcodes
14594 * - cleanup usage of DUMMY_USE
14595 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14597 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14598 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14599 * - make sure handle_stack_args () is called before the branch is emitted
14600 * - when the new IR is done, get rid of all unused stuff
14601 * - COMPARE/BEQ as separate instructions or unify them ?
14602 * - keeping them separate allows specialized compare instructions like
14603 * compare_imm, compare_membase
14604 * - most back ends unify fp compare+branch, fp compare+ceq
14605 * - integrate mono_save_args into inline_method
14606 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14607 * - handle long shift opts on 32 bit platforms somehow: they require
14608 * 3 sregs (2 for arg1 and 1 for arg2)
14609 * - make byref a 'normal' type.
14610 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14611 * variable if needed.
14612 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14613 * like inline_method.
14614 * - remove inlining restrictions
14615 * - fix LNEG and enable cfold of INEG
14616 * - generalize x86 optimizations like ldelema as a peephole optimization
14617 * - add store_mem_imm for amd64
14618 * - optimize the loading of the interruption flag in the managed->native wrappers
14619 * - avoid special handling of OP_NOP in passes
14620 * - move code inserting instructions into one function/macro.
14621 * - try a coalescing phase after liveness analysis
14622 * - add float -> vreg conversion + local optimizations on !x86
14623 * - figure out how to handle decomposed branches during optimizations, ie.
14624 * compare+branch, op_jump_table+op_br etc.
14625 * - promote RuntimeXHandles to vregs
14626 * - vtype cleanups:
14627 * - add a NEW_VARLOADA_VREG macro
14628 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14629 * accessing vtype fields.
14630 * - get rid of I8CONST on 64 bit platforms
14631 * - dealing with the increase in code size due to branches created during opcode
14633 * - use extended basic blocks
14634 * - all parts of the JIT
14635 * - handle_global_vregs () && local regalloc
14636 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14637 * - sources of increase in code size:
14640 * - isinst and castclass
14641 * - lvregs not allocated to global registers even if used multiple times
14642 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14644 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14645 * - add all micro optimizations from the old JIT
14646 * - put tree optimizations into the deadce pass
14647 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14648 * specific function.
14649 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14650 * fcompare + branchCC.
14651 * - create a helper function for allocating a stack slot, taking into account
14652 * MONO_CFG_HAS_SPILLUP.
14654 * - merge the ia64 switch changes.
14655 * - optimize mono_regstate2_alloc_int/float.
14656 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14657 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14658 * parts of the tree could be separated by other instructions, killing the tree
14659 * arguments, or stores killing loads etc. Also, should we fold loads into other
14660 * instructions if the result of the load is used multiple times ?
14661 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14662 * - LAST MERGE: 108395.
14663 * - when returning vtypes in registers, generate IR and append it to the end of the
14664 * last bb instead of doing it in the epilog.
14665 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14673 - When to decompose opcodes:
14674 - earlier: this makes some optimizations hard to implement, since the low level IR
14675 no longer contains the neccessary information. But it is easier to do.
14676 - later: harder to implement, enables more optimizations.
14677 - Branches inside bblocks:
14678 - created when decomposing complex opcodes.
14679 - branches to another bblock: harmless, but not tracked by the branch
14680 optimizations, so need to branch to a label at the start of the bblock.
14681 - branches to inside the same bblock: very problematic, trips up the local
14682 reg allocator. Can be fixed by spitting the current bblock, but that is a
14683 complex operation, since some local vregs can become global vregs etc.
14684 - Local/global vregs:
14685 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14686 local register allocator.
14687 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14688 structure, created by mono_create_var (). Assigned to hregs or the stack by
14689 the global register allocator.
14690 - When to do optimizations like alu->alu_imm:
14691 - earlier -> saves work later on since the IR will be smaller/simpler
14692 - later -> can work on more instructions
14693 - Handling of valuetypes:
14694 - When a vtype is pushed on the stack, a new temporary is created, an
14695 instruction computing its address (LDADDR) is emitted and pushed on
14696 the stack. Need to optimize cases when the vtype is used immediately as in
14697 argument passing, stloc etc.
14698 - Instead of the to_end stuff in the old JIT, simply call the function handling
14699 the values on the stack before emitting the last instruction of the bb.
14702 #endif /* DISABLE_JIT */