2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/monitor.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
64 #include <mono/metadata/reflection-internals.h>
70 #include "jit-icalls.h"
72 #include "debugger-agent.h"
73 #include "seq-points.h"
74 #include "aot-compiler.h"
75 #include "mini-llvm.h"
77 #define BRANCH_COST 10
78 #define INLINE_LENGTH_LIMIT 20
80 /* These have 'cfg' as an implicit argument */
81 #define INLINE_FAILURE(msg) do { \
82 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
83 inline_failure (cfg, msg); \
84 goto exception_exit; \
87 #define CHECK_CFG_EXCEPTION do {\
88 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
89 goto exception_exit; \
91 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
92 method_access_failure ((cfg), (method), (cmethod)); \
93 goto exception_exit; \
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 /* helper methods signatures */
156 static MonoMethodSignature *helper_sig_domain_get;
157 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
158 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
161 * Instruction metadata
169 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
170 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
176 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
181 /* keep in sync with the enum in mini.h */
184 #include "mini-ops.h"
189 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
190 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
192 * This should contain the index of the last sreg + 1. This is not the same
193 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
195 const gint8 ins_sreg_counts[] = {
196 #include "mini-ops.h"
201 #define MONO_INIT_VARINFO(vi,id) do { \
202 (vi)->range.first_use.pos.bid = 0xffff; \
208 mono_alloc_ireg (MonoCompile *cfg)
210 return alloc_ireg (cfg);
214 mono_alloc_lreg (MonoCompile *cfg)
216 return alloc_lreg (cfg);
220 mono_alloc_freg (MonoCompile *cfg)
222 return alloc_freg (cfg);
226 mono_alloc_preg (MonoCompile *cfg)
228 return alloc_preg (cfg);
232 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
234 return alloc_dreg (cfg, stack_type);
238 * mono_alloc_ireg_ref:
240 * Allocate an IREG, and mark it as holding a GC ref.
243 mono_alloc_ireg_ref (MonoCompile *cfg)
245 return alloc_ireg_ref (cfg);
249 * mono_alloc_ireg_mp:
251 * Allocate an IREG, and mark it as holding a managed pointer.
254 mono_alloc_ireg_mp (MonoCompile *cfg)
256 return alloc_ireg_mp (cfg);
260 * mono_alloc_ireg_copy:
262 * Allocate an IREG with the same GC type as VREG.
265 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
267 if (vreg_is_ref (cfg, vreg))
268 return alloc_ireg_ref (cfg);
269 else if (vreg_is_mp (cfg, vreg))
270 return alloc_ireg_mp (cfg);
272 return alloc_ireg (cfg);
276 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
281 type = mini_get_underlying_type (type);
283 switch (type->type) {
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
312 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->gshared);
331 if (mini_type_var_is_vt (type))
334 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
363 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
366 static MONO_NEVER_INLINE void
367 break_on_unverified (void)
369 if (mini_get_debug_options ()->break_on_unverified)
373 static MONO_NEVER_INLINE void
374 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
376 char *method_fname = mono_method_full_name (method, TRUE);
377 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
379 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
380 g_free (method_fname);
381 g_free (cil_method_fname);
384 static MONO_NEVER_INLINE void
385 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
387 char *method_fname = mono_method_full_name (method, TRUE);
388 char *field_fname = mono_field_full_name (field);
389 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
390 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
391 g_free (method_fname);
392 g_free (field_fname);
395 static MONO_NEVER_INLINE void
396 inline_failure (MonoCompile *cfg, const char *msg)
398 if (cfg->verbose_level >= 2)
399 printf ("inline failed: %s\n", msg);
400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
403 static MONO_NEVER_INLINE void
404 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
406 if (cfg->verbose_level > 2) \
407 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
411 static MONO_NEVER_INLINE void
412 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
414 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
415 if (cfg->verbose_level >= 2)
416 printf ("%s\n", cfg->exception_message);
417 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
421 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
422 * foo<T> (int i) { ldarg.0; box T; }
424 #define UNVERIFIED do { \
425 if (cfg->gsharedvt) { \
426 if (cfg->verbose_level > 2) \
427 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
428 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
429 goto exception_exit; \
431 break_on_unverified (); \
435 #define GET_BBLOCK(cfg,tblock,ip) do { \
436 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
438 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
439 NEW_BBLOCK (cfg, (tblock)); \
440 (tblock)->cil_code = (ip); \
441 ADD_BBLOCK (cfg, (tblock)); \
445 #if defined(TARGET_X86) || defined(TARGET_AMD64)
446 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
447 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
448 (dest)->dreg = alloc_ireg_mp ((cfg)); \
449 (dest)->sreg1 = (sr1); \
450 (dest)->sreg2 = (sr2); \
451 (dest)->inst_imm = (imm); \
452 (dest)->backend.shift_amount = (shift); \
453 MONO_ADD_INS ((cfg)->cbb, (dest)); \
457 /* Emit conversions so both operands of a binary opcode are of the same type */
459 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
461 MonoInst *arg1 = *arg1_ref;
462 MonoInst *arg2 = *arg2_ref;
465 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
466 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
469 /* Mixing r4/r8 is allowed by the spec */
470 if (arg1->type == STACK_R4) {
471 int dreg = alloc_freg (cfg);
473 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
474 conv->type = STACK_R8;
478 if (arg2->type == STACK_R4) {
479 int dreg = alloc_freg (cfg);
481 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
482 conv->type = STACK_R8;
488 #if SIZEOF_REGISTER == 8
489 /* FIXME: Need to add many more cases */
490 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
493 int dr = alloc_preg (cfg);
494 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
495 (ins)->sreg2 = widen->dreg;
500 #define ADD_BINOP(op) do { \
501 MONO_INST_NEW (cfg, ins, (op)); \
503 ins->sreg1 = sp [0]->dreg; \
504 ins->sreg2 = sp [1]->dreg; \
505 type_from_op (cfg, ins, sp [0], sp [1]); \
507 /* Have to insert a widening op */ \
508 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
509 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
510 MONO_ADD_INS ((cfg)->cbb, (ins)); \
511 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
514 #define ADD_UNOP(op) do { \
515 MONO_INST_NEW (cfg, ins, (op)); \
517 ins->sreg1 = sp [0]->dreg; \
518 type_from_op (cfg, ins, sp [0], NULL); \
520 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
521 MONO_ADD_INS ((cfg)->cbb, (ins)); \
522 *sp++ = mono_decompose_opcode (cfg, ins); \
525 #define ADD_BINCOND(next_block) do { \
528 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
529 cmp->sreg1 = sp [0]->dreg; \
530 cmp->sreg2 = sp [1]->dreg; \
531 type_from_op (cfg, cmp, sp [0], sp [1]); \
533 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
534 type_from_op (cfg, ins, sp [0], sp [1]); \
535 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
536 GET_BBLOCK (cfg, tblock, target); \
537 link_bblock (cfg, cfg->cbb, tblock); \
538 ins->inst_true_bb = tblock; \
539 if ((next_block)) { \
540 link_bblock (cfg, cfg->cbb, (next_block)); \
541 ins->inst_false_bb = (next_block); \
542 start_new_bblock = 1; \
544 GET_BBLOCK (cfg, tblock, ip); \
545 link_bblock (cfg, cfg->cbb, tblock); \
546 ins->inst_false_bb = tblock; \
547 start_new_bblock = 2; \
549 if (sp != stack_start) { \
550 handle_stack_args (cfg, stack_start, sp - stack_start); \
551 CHECK_UNVERIFIABLE (cfg); \
553 MONO_ADD_INS (cfg->cbb, cmp); \
554 MONO_ADD_INS (cfg->cbb, ins); \
558 * link_bblock: Links two basic blocks
560 * links two basic blocks in the control flow graph, the 'from'
561 * argument is the starting block and the 'to' argument is the block
562 * the control flow ends to after 'from'.
565 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
567 MonoBasicBlock **newa;
571 if (from->cil_code) {
573 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
575 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
578 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
580 printf ("edge from entry to exit\n");
585 for (i = 0; i < from->out_count; ++i) {
586 if (to == from->out_bb [i]) {
592 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
593 for (i = 0; i < from->out_count; ++i) {
594 newa [i] = from->out_bb [i];
602 for (i = 0; i < to->in_count; ++i) {
603 if (from == to->in_bb [i]) {
609 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
610 for (i = 0; i < to->in_count; ++i) {
611 newa [i] = to->in_bb [i];
620 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
622 link_bblock (cfg, from, to);
626 * mono_find_block_region:
628 * We mark each basic block with a region ID. We use that to avoid BB
629 * optimizations when blocks are in different regions.
632 * A region token that encodes where this region is, and information
633 * about the clause owner for this block.
635 * The region encodes the try/catch/filter clause that owns this block
636 * as well as the type. -1 is a special value that represents a block
637 * that is in none of try/catch/filter.
640 mono_find_block_region (MonoCompile *cfg, int offset)
642 MonoMethodHeader *header = cfg->header;
643 MonoExceptionClause *clause;
646 for (i = 0; i < header->num_clauses; ++i) {
647 clause = &header->clauses [i];
648 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
649 (offset < (clause->handler_offset)))
650 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
652 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
653 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
654 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
655 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
656 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
658 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
661 for (i = 0; i < header->num_clauses; ++i) {
662 clause = &header->clauses [i];
664 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
665 return ((i + 1) << 8) | clause->flags;
672 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
674 MonoMethodHeader *header = cfg->header;
675 MonoExceptionClause *clause;
679 for (i = 0; i < header->num_clauses; ++i) {
680 clause = &header->clauses [i];
681 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
682 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
683 if (clause->flags == type)
684 res = g_list_append (res, clause);
691 mono_create_spvar_for_region (MonoCompile *cfg, int region)
695 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
699 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
700 /* prevent it from being register allocated */
701 var->flags |= MONO_INST_VOLATILE;
703 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
707 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
709 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
713 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
717 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
721 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
722 /* prevent it from being register allocated */
723 var->flags |= MONO_INST_VOLATILE;
725 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
731 * Returns the type used in the eval stack when @type is loaded.
732 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
735 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
739 type = mini_get_underlying_type (type);
740 inst->klass = klass = mono_class_from_mono_type (type);
742 inst->type = STACK_MP;
747 switch (type->type) {
749 inst->type = STACK_INV;
757 inst->type = STACK_I4;
762 case MONO_TYPE_FNPTR:
763 inst->type = STACK_PTR;
765 case MONO_TYPE_CLASS:
766 case MONO_TYPE_STRING:
767 case MONO_TYPE_OBJECT:
768 case MONO_TYPE_SZARRAY:
769 case MONO_TYPE_ARRAY:
770 inst->type = STACK_OBJ;
774 inst->type = STACK_I8;
777 inst->type = cfg->r4_stack_type;
780 inst->type = STACK_R8;
782 case MONO_TYPE_VALUETYPE:
783 if (type->data.klass->enumtype) {
784 type = mono_class_enum_basetype (type->data.klass);
788 inst->type = STACK_VTYPE;
791 case MONO_TYPE_TYPEDBYREF:
792 inst->klass = mono_defaults.typed_reference_class;
793 inst->type = STACK_VTYPE;
795 case MONO_TYPE_GENERICINST:
796 type = &type->data.generic_class->container_class->byval_arg;
800 g_assert (cfg->gshared);
801 if (mini_is_gsharedvt_type (type)) {
802 g_assert (cfg->gsharedvt);
803 inst->type = STACK_VTYPE;
805 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
809 g_error ("unknown type 0x%02x in eval stack type", type->type);
814 * The following tables are used to quickly validate the IL code in type_from_op ().
817 bin_num_table [STACK_MAX] [STACK_MAX] = {
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
820 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
821 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
823 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
831 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
834 /* reduce the size of this table */
836 bin_int_table [STACK_MAX] [STACK_MAX] = {
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
848 bin_comp_table [STACK_MAX] [STACK_MAX] = {
849 /* Inv i L p F & O vt r4 */
851 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
852 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
853 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
854 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
855 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
856 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
857 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
861 /* reduce the size of this table */
863 shift_table [STACK_MAX] [STACK_MAX] = {
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
865 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
866 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
867 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
875 * Tables to map from the non-specific opcode to the matching
876 * type-specific opcode.
878 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
880 binops_op_map [STACK_MAX] = {
881 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
884 /* handles from CEE_NEG to CEE_CONV_U8 */
886 unops_op_map [STACK_MAX] = {
887 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
890 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
892 ovfops_op_map [STACK_MAX] = {
893 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
896 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
898 ovf2ops_op_map [STACK_MAX] = {
899 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
902 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
904 ovf3ops_op_map [STACK_MAX] = {
905 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
908 /* handles from CEE_BEQ to CEE_BLT_UN */
910 beqops_op_map [STACK_MAX] = {
911 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
914 /* handles from CEE_CEQ to CEE_CLT_UN */
916 ceqops_op_map [STACK_MAX] = {
917 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
921 * Sets ins->type (the type on the eval stack) according to the
922 * type of the opcode and the arguments to it.
923 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
925 * FIXME: this function sets ins->type unconditionally in some cases, but
926 * it should set it to invalid for some types (a conv.x on an object)
929 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
931 switch (ins->opcode) {
938 /* FIXME: check unverifiable args for STACK_MP */
939 ins->type = bin_num_table [src1->type] [src2->type];
940 ins->opcode += binops_op_map [ins->type];
947 ins->type = bin_int_table [src1->type] [src2->type];
948 ins->opcode += binops_op_map [ins->type];
953 ins->type = shift_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
959 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
960 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
961 ins->opcode = OP_LCOMPARE;
962 else if (src1->type == STACK_R4)
963 ins->opcode = OP_RCOMPARE;
964 else if (src1->type == STACK_R8)
965 ins->opcode = OP_FCOMPARE;
967 ins->opcode = OP_ICOMPARE;
969 case OP_ICOMPARE_IMM:
970 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
971 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
972 ins->opcode = OP_LCOMPARE_IMM;
984 ins->opcode += beqops_op_map [src1->type];
987 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
988 ins->opcode += ceqops_op_map [src1->type];
994 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
995 ins->opcode += ceqops_op_map [src1->type];
999 ins->type = neg_table [src1->type];
1000 ins->opcode += unops_op_map [ins->type];
1003 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1004 ins->type = src1->type;
1006 ins->type = STACK_INV;
1007 ins->opcode += unops_op_map [ins->type];
1013 ins->type = STACK_I4;
1014 ins->opcode += unops_op_map [src1->type];
1017 ins->type = STACK_R8;
1018 switch (src1->type) {
1021 ins->opcode = OP_ICONV_TO_R_UN;
1024 ins->opcode = OP_LCONV_TO_R_UN;
1028 case CEE_CONV_OVF_I1:
1029 case CEE_CONV_OVF_U1:
1030 case CEE_CONV_OVF_I2:
1031 case CEE_CONV_OVF_U2:
1032 case CEE_CONV_OVF_I4:
1033 case CEE_CONV_OVF_U4:
1034 ins->type = STACK_I4;
1035 ins->opcode += ovf3ops_op_map [src1->type];
1037 case CEE_CONV_OVF_I_UN:
1038 case CEE_CONV_OVF_U_UN:
1039 ins->type = STACK_PTR;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1042 case CEE_CONV_OVF_I1_UN:
1043 case CEE_CONV_OVF_I2_UN:
1044 case CEE_CONV_OVF_I4_UN:
1045 case CEE_CONV_OVF_U1_UN:
1046 case CEE_CONV_OVF_U2_UN:
1047 case CEE_CONV_OVF_U4_UN:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf2ops_op_map [src1->type];
1052 ins->type = STACK_PTR;
1053 switch (src1->type) {
1055 ins->opcode = OP_ICONV_TO_U;
1059 #if SIZEOF_VOID_P == 8
1060 ins->opcode = OP_LCONV_TO_U;
1062 ins->opcode = OP_MOVE;
1066 ins->opcode = OP_LCONV_TO_U;
1069 ins->opcode = OP_FCONV_TO_U;
1075 ins->type = STACK_I8;
1076 ins->opcode += unops_op_map [src1->type];
1078 case CEE_CONV_OVF_I8:
1079 case CEE_CONV_OVF_U8:
1080 ins->type = STACK_I8;
1081 ins->opcode += ovf3ops_op_map [src1->type];
1083 case CEE_CONV_OVF_U8_UN:
1084 case CEE_CONV_OVF_I8_UN:
1085 ins->type = STACK_I8;
1086 ins->opcode += ovf2ops_op_map [src1->type];
1089 ins->type = cfg->r4_stack_type;
1090 ins->opcode += unops_op_map [src1->type];
1093 ins->type = STACK_R8;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1101 ins->type = STACK_I4;
1102 ins->opcode += ovfops_op_map [src1->type];
1105 case CEE_CONV_OVF_I:
1106 case CEE_CONV_OVF_U:
1107 ins->type = STACK_PTR;
1108 ins->opcode += ovfops_op_map [src1->type];
1111 case CEE_ADD_OVF_UN:
1113 case CEE_MUL_OVF_UN:
1115 case CEE_SUB_OVF_UN:
1116 ins->type = bin_num_table [src1->type] [src2->type];
1117 ins->opcode += ovfops_op_map [src1->type];
1118 if (ins->type == STACK_R8)
1119 ins->type = STACK_INV;
1121 case OP_LOAD_MEMBASE:
1122 ins->type = STACK_PTR;
1124 case OP_LOADI1_MEMBASE:
1125 case OP_LOADU1_MEMBASE:
1126 case OP_LOADI2_MEMBASE:
1127 case OP_LOADU2_MEMBASE:
1128 case OP_LOADI4_MEMBASE:
1129 case OP_LOADU4_MEMBASE:
1130 ins->type = STACK_PTR;
1132 case OP_LOADI8_MEMBASE:
1133 ins->type = STACK_I8;
1135 case OP_LOADR4_MEMBASE:
1136 ins->type = cfg->r4_stack_type;
1138 case OP_LOADR8_MEMBASE:
1139 ins->type = STACK_R8;
1142 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1146 if (ins->type == STACK_MP)
1147 ins->klass = mono_defaults.object_class;
1152 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1158 param_table [STACK_MAX] [STACK_MAX] = {
1163 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1168 switch (args->type) {
1178 for (i = 0; i < sig->param_count; ++i) {
1179 switch (args [i].type) {
1183 if (!sig->params [i]->byref)
1187 if (sig->params [i]->byref)
1189 switch (sig->params [i]->type) {
1190 case MONO_TYPE_CLASS:
1191 case MONO_TYPE_STRING:
1192 case MONO_TYPE_OBJECT:
1193 case MONO_TYPE_SZARRAY:
1194 case MONO_TYPE_ARRAY:
1201 if (sig->params [i]->byref)
1203 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1212 /*if (!param_table [args [i].type] [sig->params [i]->type])
1220 * When we need a pointer to the current domain many times in a method, we
1221 * call mono_domain_get() once and we store the result in a local variable.
1222 * This function returns the variable that represents the MonoDomain*.
1224 inline static MonoInst *
1225 mono_get_domainvar (MonoCompile *cfg)
1227 if (!cfg->domainvar)
1228 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1229 return cfg->domainvar;
1233 * The got_var contains the address of the Global Offset Table when AOT
1237 mono_get_got_var (MonoCompile *cfg)
1239 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1241 if (!cfg->got_var) {
1242 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1244 return cfg->got_var;
1248 mono_get_vtable_var (MonoCompile *cfg)
1250 g_assert (cfg->gshared);
1252 if (!cfg->rgctx_var) {
1253 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1254 /* force the var to be stack allocated */
1255 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1258 return cfg->rgctx_var;
1262 type_from_stack_type (MonoInst *ins) {
1263 switch (ins->type) {
1264 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1265 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1266 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1267 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1268 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1270 return &ins->klass->this_arg;
1271 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1272 case STACK_VTYPE: return &ins->klass->byval_arg;
1274 g_error ("stack type %d to monotype not handled\n", ins->type);
1279 static G_GNUC_UNUSED int
1280 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1282 t = mono_type_get_underlying_type (t);
1294 case MONO_TYPE_FNPTR:
1296 case MONO_TYPE_CLASS:
1297 case MONO_TYPE_STRING:
1298 case MONO_TYPE_OBJECT:
1299 case MONO_TYPE_SZARRAY:
1300 case MONO_TYPE_ARRAY:
1306 return cfg->r4_stack_type;
1309 case MONO_TYPE_VALUETYPE:
1310 case MONO_TYPE_TYPEDBYREF:
1312 case MONO_TYPE_GENERICINST:
1313 if (mono_type_generic_inst_is_valuetype (t))
1319 g_assert_not_reached ();
1326 array_access_to_klass (int opcode)
1330 return mono_defaults.byte_class;
1332 return mono_defaults.uint16_class;
1335 return mono_defaults.int_class;
1338 return mono_defaults.sbyte_class;
1341 return mono_defaults.int16_class;
1344 return mono_defaults.int32_class;
1346 return mono_defaults.uint32_class;
1349 return mono_defaults.int64_class;
1352 return mono_defaults.single_class;
1355 return mono_defaults.double_class;
1356 case CEE_LDELEM_REF:
1357 case CEE_STELEM_REF:
1358 return mono_defaults.object_class;
1360 g_assert_not_reached ();
1366 * We try to share variables when possible
1369 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1374 /* inlining can result in deeper stacks */
1375 if (slot >= cfg->header->max_stack)
1376 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1378 pos = ins->type - 1 + slot * STACK_MAX;
1380 switch (ins->type) {
1387 if ((vnum = cfg->intvars [pos]))
1388 return cfg->varinfo [vnum];
1389 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1390 cfg->intvars [pos] = res->inst_c0;
1393 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1399 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1402 * Don't use this if a generic_context is set, since that means AOT can't
1403 * look up the method using just the image+token.
1404 * table == 0 means this is a reference made from a wrapper.
1406 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1407 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1408 jump_info_token->image = image;
1409 jump_info_token->token = token;
1410 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1415 * This function is called to handle items that are left on the evaluation stack
1416 * at basic block boundaries. What happens is that we save the values to local variables
1417 * and we reload them later when first entering the target basic block (with the
1418 * handle_loaded_temps () function).
1419 * A single joint point will use the same variables (stored in the array bb->out_stack or
1420 * bb->in_stack, if the basic block is before or after the joint point).
1422 * This function needs to be called _before_ emitting the last instruction of
1423 * the bb (i.e. before emitting a branch).
1424 * If the stack merge fails at a join point, cfg->unverifiable is set.
1427 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1430 MonoBasicBlock *bb = cfg->cbb;
1431 MonoBasicBlock *outb;
1432 MonoInst *inst, **locals;
1437 if (cfg->verbose_level > 3)
1438 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1439 if (!bb->out_scount) {
1440 bb->out_scount = count;
1441 //printf ("bblock %d has out:", bb->block_num);
1443 for (i = 0; i < bb->out_count; ++i) {
1444 outb = bb->out_bb [i];
1445 /* exception handlers are linked, but they should not be considered for stack args */
1446 if (outb->flags & BB_EXCEPTION_HANDLER)
1448 //printf (" %d", outb->block_num);
1449 if (outb->in_stack) {
1451 bb->out_stack = outb->in_stack;
1457 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1458 for (i = 0; i < count; ++i) {
1460 * try to reuse temps already allocated for this purpouse, if they occupy the same
1461 * stack slot and if they are of the same type.
1462 * This won't cause conflicts since if 'local' is used to
1463 * store one of the values in the in_stack of a bblock, then
1464 * the same variable will be used for the same outgoing stack
1466 * This doesn't work when inlining methods, since the bblocks
1467 * in the inlined methods do not inherit their in_stack from
1468 * the bblock they are inlined to. See bug #58863 for an
1471 if (cfg->inlined_method)
1472 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1474 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1479 for (i = 0; i < bb->out_count; ++i) {
1480 outb = bb->out_bb [i];
1481 /* exception handlers are linked, but they should not be considered for stack args */
1482 if (outb->flags & BB_EXCEPTION_HANDLER)
1484 if (outb->in_scount) {
1485 if (outb->in_scount != bb->out_scount) {
1486 cfg->unverifiable = TRUE;
1489 continue; /* check they are the same locals */
1491 outb->in_scount = count;
1492 outb->in_stack = bb->out_stack;
1495 locals = bb->out_stack;
1497 for (i = 0; i < count; ++i) {
1498 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1499 inst->cil_code = sp [i]->cil_code;
1500 sp [i] = locals [i];
1501 if (cfg->verbose_level > 3)
1502 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1506 * It is possible that the out bblocks already have in_stack assigned, and
1507 * the in_stacks differ. In this case, we will store to all the different
1514 /* Find a bblock which has a different in_stack */
1516 while (bindex < bb->out_count) {
1517 outb = bb->out_bb [bindex];
1518 /* exception handlers are linked, but they should not be considered for stack args */
1519 if (outb->flags & BB_EXCEPTION_HANDLER) {
1523 if (outb->in_stack != locals) {
1524 for (i = 0; i < count; ++i) {
1525 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1526 inst->cil_code = sp [i]->cil_code;
1527 sp [i] = locals [i];
1528 if (cfg->verbose_level > 3)
1529 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1531 locals = outb->in_stack;
1541 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1545 if (cfg->compile_aot) {
1546 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1551 ji.type = patch_type;
1552 ji.data.target = data;
1553 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE);
1555 EMIT_NEW_PCONST (cfg, ins, target);
1561 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1563 int ibitmap_reg = alloc_preg (cfg);
1564 #ifdef COMPRESSED_INTERFACE_BITMAP
1566 MonoInst *res, *ins;
1567 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1568 MONO_ADD_INS (cfg->cbb, ins);
1570 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1571 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1572 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1574 int ibitmap_byte_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1578 if (cfg->compile_aot) {
1579 int iid_reg = alloc_preg (cfg);
1580 int shifted_iid_reg = alloc_preg (cfg);
1581 int ibitmap_byte_address_reg = alloc_preg (cfg);
1582 int masked_iid_reg = alloc_preg (cfg);
1583 int iid_one_bit_reg = alloc_preg (cfg);
1584 int iid_bit_reg = alloc_preg (cfg);
1585 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1587 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1590 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1591 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1592 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1594 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1601 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1602 * stored in "klass_reg" implements the interface "klass".
1605 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1607 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1611 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1612 * stored in "vtable_reg" implements the interface "klass".
1615 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1617 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1621 * Emit code which checks whenever the interface id of @klass is smaller than
1622 * than the value given by max_iid_reg.
1625 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1626 MonoBasicBlock *false_target)
1628 if (cfg->compile_aot) {
1629 int iid_reg = alloc_preg (cfg);
1630 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1631 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1636 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1638 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1641 /* Same as above, but obtains max_iid from a vtable */
1643 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1644 MonoBasicBlock *false_target)
1646 int max_iid_reg = alloc_preg (cfg);
1648 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1649 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1652 /* Same as above, but obtains max_iid from a klass */
1654 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1655 MonoBasicBlock *false_target)
1657 int max_iid_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1660 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1664 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1666 int idepth_reg = alloc_preg (cfg);
1667 int stypes_reg = alloc_preg (cfg);
1668 int stype = alloc_preg (cfg);
1670 mono_class_setup_supertypes (klass);
1672 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1673 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1675 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1677 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1680 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1681 } else if (cfg->compile_aot) {
1682 int const_reg = alloc_preg (cfg);
1683 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1684 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1692 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1694 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1698 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1700 int intf_reg = alloc_preg (cfg);
1702 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1703 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1706 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1708 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1712 * Variant of the above that takes a register to the class, not the vtable.
1715 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1717 int intf_bit_reg = alloc_preg (cfg);
1719 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1720 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1721 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1725 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1729 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1732 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1734 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1735 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1737 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1741 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1743 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1747 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1749 if (cfg->compile_aot) {
1750 int const_reg = alloc_preg (cfg);
1751 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1752 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1756 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1760 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1763 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1766 int rank_reg = alloc_preg (cfg);
1767 int eclass_reg = alloc_preg (cfg);
1769 g_assert (!klass_inst);
1770 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1771 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1772 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1773 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1774 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1775 if (klass->cast_class == mono_defaults.object_class) {
1776 int parent_reg = alloc_preg (cfg);
1777 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1778 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1779 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1780 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1781 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1782 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1783 } else if (klass->cast_class == mono_defaults.enum_class) {
1784 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1785 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1786 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1788 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1789 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1792 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1793 /* Check that the object is a vector too */
1794 int bounds_reg = alloc_preg (cfg);
1795 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1796 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1797 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1800 int idepth_reg = alloc_preg (cfg);
1801 int stypes_reg = alloc_preg (cfg);
1802 int stype = alloc_preg (cfg);
1804 mono_class_setup_supertypes (klass);
1806 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1807 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1809 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1813 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1818 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1820 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1824 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1828 g_assert (val == 0);
1833 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1836 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1839 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1842 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1844 #if SIZEOF_REGISTER == 8
1846 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1852 val_reg = alloc_preg (cfg);
1854 if (SIZEOF_REGISTER == 8)
1855 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1857 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1860 /* This could be optimized further if neccesary */
1862 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1869 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1871 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1893 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1900 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1907 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1908 g_assert (size < 10000);
1911 /* This could be optimized further if neccesary */
1913 cur_reg = alloc_preg (cfg);
1914 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1915 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1922 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1924 cur_reg = alloc_preg (cfg);
1925 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1926 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1934 cur_reg = alloc_preg (cfg);
1935 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1936 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1942 cur_reg = alloc_preg (cfg);
1943 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1944 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1950 cur_reg = alloc_preg (cfg);
1951 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1952 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1960 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1964 if (cfg->compile_aot) {
1965 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1966 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1968 ins->sreg2 = c->dreg;
1969 MONO_ADD_INS (cfg->cbb, ins);
1971 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1973 ins->inst_offset = mini_get_tls_offset (tls_key);
1974 MONO_ADD_INS (cfg->cbb, ins);
1981 * Emit IR to push the current LMF onto the LMF stack.
1984 emit_push_lmf (MonoCompile *cfg)
1987 * Emit IR to push the LMF:
1988 * lmf_addr = <lmf_addr from tls>
1989 * lmf->lmf_addr = lmf_addr
1990 * lmf->prev_lmf = *lmf_addr
1993 int lmf_reg, prev_lmf_reg;
1994 MonoInst *ins, *lmf_ins;
1999 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2000 /* Load current lmf */
2001 lmf_ins = mono_get_lmf_intrinsic (cfg);
2003 MONO_ADD_INS (cfg->cbb, lmf_ins);
2004 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2005 lmf_reg = ins->dreg;
2006 /* Save previous_lmf */
2007 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2009 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2012 * Store lmf_addr in a variable, so it can be allocated to a global register.
2014 if (!cfg->lmf_addr_var)
2015 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2018 ins = mono_get_jit_tls_intrinsic (cfg);
2020 int jit_tls_dreg = ins->dreg;
2022 MONO_ADD_INS (cfg->cbb, ins);
2023 lmf_reg = alloc_preg (cfg);
2024 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2026 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2029 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2031 MONO_ADD_INS (cfg->cbb, lmf_ins);
2034 MonoInst *args [16], *jit_tls_ins, *ins;
2036 /* Inline mono_get_lmf_addr () */
2037 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2039 /* Load mono_jit_tls_id */
2040 if (cfg->compile_aot)
2041 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2043 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2044 /* call pthread_getspecific () */
2045 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2046 /* lmf_addr = &jit_tls->lmf */
2047 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2050 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2054 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2056 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2057 lmf_reg = ins->dreg;
2059 prev_lmf_reg = alloc_preg (cfg);
2060 /* Save previous_lmf */
2061 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2062 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2064 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2071 * Emit IR to pop the current LMF from the LMF stack.
2074 emit_pop_lmf (MonoCompile *cfg)
2076 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2082 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2083 lmf_reg = ins->dreg;
2085 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2086 /* Load previous_lmf */
2087 prev_lmf_reg = alloc_preg (cfg);
2088 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2090 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2093 * Emit IR to pop the LMF:
2094 * *(lmf->lmf_addr) = lmf->prev_lmf
2096 /* This could be called before emit_push_lmf () */
2097 if (!cfg->lmf_addr_var)
2098 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2099 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2101 prev_lmf_reg = alloc_preg (cfg);
2102 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2103 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2108 emit_instrumentation_call (MonoCompile *cfg, void *func)
2110 MonoInst *iargs [1];
2113 * Avoid instrumenting inlined methods since it can
2114 * distort profiling results.
2116 if (cfg->method != cfg->current_method)
2119 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2120 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2121 mono_emit_jit_icall (cfg, func, iargs);
2126 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2129 type = mini_get_underlying_type (type);
2130 switch (type->type) {
2131 case MONO_TYPE_VOID:
2132 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2139 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2143 case MONO_TYPE_FNPTR:
2144 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2145 case MONO_TYPE_CLASS:
2146 case MONO_TYPE_STRING:
2147 case MONO_TYPE_OBJECT:
2148 case MONO_TYPE_SZARRAY:
2149 case MONO_TYPE_ARRAY:
2150 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2153 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2156 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2158 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2160 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2161 case MONO_TYPE_VALUETYPE:
2162 if (type->data.klass->enumtype) {
2163 type = mono_class_enum_basetype (type->data.klass);
2166 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2167 case MONO_TYPE_TYPEDBYREF:
2168 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2169 case MONO_TYPE_GENERICINST:
2170 type = &type->data.generic_class->container_class->byval_arg;
2173 case MONO_TYPE_MVAR:
2175 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2177 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2183 * target_type_is_incompatible:
2184 * @cfg: MonoCompile context
2186 * Check that the item @arg on the evaluation stack can be stored
2187 * in the target type (can be a local, or field, etc).
2188 * The cfg arg can be used to check if we need verification or just
2191 * Returns: non-0 value if arg can't be stored on a target.
2194 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2196 MonoType *simple_type;
2199 if (target->byref) {
2200 /* FIXME: check that the pointed to types match */
2201 if (arg->type == STACK_MP) {
2202 MonoClass *base_class = mono_class_from_mono_type (target);
2203 /* This is needed to handle gshared types + ldaddr */
2204 simple_type = mini_get_underlying_type (&base_class->byval_arg);
2205 return target->type != MONO_TYPE_I && arg->klass != base_class && arg->klass != mono_class_from_mono_type (simple_type);
2207 if (arg->type == STACK_PTR)
2212 simple_type = mini_get_underlying_type (target);
2213 switch (simple_type->type) {
2214 case MONO_TYPE_VOID:
2222 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2226 /* STACK_MP is needed when setting pinned locals */
2227 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2232 case MONO_TYPE_FNPTR:
2234 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2235 * in native int. (#688008).
2237 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2240 case MONO_TYPE_CLASS:
2241 case MONO_TYPE_STRING:
2242 case MONO_TYPE_OBJECT:
2243 case MONO_TYPE_SZARRAY:
2244 case MONO_TYPE_ARRAY:
2245 if (arg->type != STACK_OBJ)
2247 /* FIXME: check type compatibility */
2251 if (arg->type != STACK_I8)
2255 if (arg->type != cfg->r4_stack_type)
2259 if (arg->type != STACK_R8)
2262 case MONO_TYPE_VALUETYPE:
2263 if (arg->type != STACK_VTYPE)
2265 klass = mono_class_from_mono_type (simple_type);
2266 if (klass != arg->klass)
2269 case MONO_TYPE_TYPEDBYREF:
2270 if (arg->type != STACK_VTYPE)
2272 klass = mono_class_from_mono_type (simple_type);
2273 if (klass != arg->klass)
2276 case MONO_TYPE_GENERICINST:
2277 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2278 MonoClass *target_class;
2279 if (arg->type != STACK_VTYPE)
2281 klass = mono_class_from_mono_type (simple_type);
2282 target_class = mono_class_from_mono_type (target);
2283 /* The second cases is needed when doing partial sharing */
2284 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2288 if (arg->type != STACK_OBJ)
2290 /* FIXME: check type compatibility */
2294 case MONO_TYPE_MVAR:
2295 g_assert (cfg->gshared);
2296 if (mini_type_var_is_vt (simple_type)) {
2297 if (arg->type != STACK_VTYPE)
2300 if (arg->type != STACK_OBJ)
2305 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2311 * Prepare arguments for passing to a function call.
2312 * Return a non-zero value if the arguments can't be passed to the given
2314 * The type checks are not yet complete and some conversions may need
2315 * casts on 32 or 64 bit architectures.
2317 * FIXME: implement this using target_type_is_incompatible ()
2320 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2322 MonoType *simple_type;
2326 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2330 for (i = 0; i < sig->param_count; ++i) {
2331 if (sig->params [i]->byref) {
2332 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2336 simple_type = mini_get_underlying_type (sig->params [i]);
2338 switch (simple_type->type) {
2339 case MONO_TYPE_VOID:
2348 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2354 case MONO_TYPE_FNPTR:
2355 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2358 case MONO_TYPE_CLASS:
2359 case MONO_TYPE_STRING:
2360 case MONO_TYPE_OBJECT:
2361 case MONO_TYPE_SZARRAY:
2362 case MONO_TYPE_ARRAY:
2363 if (args [i]->type != STACK_OBJ)
2368 if (args [i]->type != STACK_I8)
2372 if (args [i]->type != cfg->r4_stack_type)
2376 if (args [i]->type != STACK_R8)
2379 case MONO_TYPE_VALUETYPE:
2380 if (simple_type->data.klass->enumtype) {
2381 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2384 if (args [i]->type != STACK_VTYPE)
2387 case MONO_TYPE_TYPEDBYREF:
2388 if (args [i]->type != STACK_VTYPE)
2391 case MONO_TYPE_GENERICINST:
2392 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2395 case MONO_TYPE_MVAR:
2397 if (args [i]->type != STACK_VTYPE)
2401 g_error ("unknown type 0x%02x in check_call_signature",
2409 callvirt_to_call (int opcode)
2412 case OP_CALL_MEMBASE:
2414 case OP_VOIDCALL_MEMBASE:
2416 case OP_FCALL_MEMBASE:
2418 case OP_RCALL_MEMBASE:
2420 case OP_VCALL_MEMBASE:
2422 case OP_LCALL_MEMBASE:
2425 g_assert_not_reached ();
2432 callvirt_to_call_reg (int opcode)
2435 case OP_CALL_MEMBASE:
2437 case OP_VOIDCALL_MEMBASE:
2438 return OP_VOIDCALL_REG;
2439 case OP_FCALL_MEMBASE:
2440 return OP_FCALL_REG;
2441 case OP_RCALL_MEMBASE:
2442 return OP_RCALL_REG;
2443 case OP_VCALL_MEMBASE:
2444 return OP_VCALL_REG;
2445 case OP_LCALL_MEMBASE:
2446 return OP_LCALL_REG;
2448 g_assert_not_reached ();
2454 /* Either METHOD or IMT_ARG needs to be set */
2456 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2460 if (COMPILE_LLVM (cfg)) {
2462 method_reg = alloc_preg (cfg);
2463 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2465 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2466 method_reg = ins->dreg;
2470 call->imt_arg_reg = method_reg;
2472 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2477 method_reg = alloc_preg (cfg);
2478 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2480 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2481 method_reg = ins->dreg;
2484 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2487 static MonoJumpInfo *
2488 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2490 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2494 ji->data.target = target;
2500 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2503 return mono_class_check_context_used (klass);
2509 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2512 return mono_method_check_context_used (method);
2518 * check_method_sharing:
2520 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2523 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2525 gboolean pass_vtable = FALSE;
2526 gboolean pass_mrgctx = FALSE;
2528 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2529 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2530 gboolean sharable = FALSE;
2532 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2536 * Pass vtable iff target method might
2537 * be shared, which means that sharing
2538 * is enabled for its class and its
2539 * context is sharable (and it's not a
2542 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2546 if (mini_method_get_context (cmethod) &&
2547 mini_method_get_context (cmethod)->method_inst) {
2548 g_assert (!pass_vtable);
2550 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2553 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2558 if (out_pass_vtable)
2559 *out_pass_vtable = pass_vtable;
2560 if (out_pass_mrgctx)
2561 *out_pass_mrgctx = pass_mrgctx;
2564 inline static MonoCallInst *
2565 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2566 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2570 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2578 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2580 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2582 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2585 call->signature = sig;
2586 call->rgctx_reg = rgctx;
2587 sig_ret = mini_get_underlying_type (sig->ret);
2589 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2592 if (mini_type_is_vtype (sig_ret)) {
2593 call->vret_var = cfg->vret_addr;
2594 //g_assert_not_reached ();
2596 } else if (mini_type_is_vtype (sig_ret)) {
2597 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2600 temp->backend.is_pinvoke = sig->pinvoke;
2603 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2604 * address of return value to increase optimization opportunities.
2605 * Before vtype decomposition, the dreg of the call ins itself represents the
2606 * fact the call modifies the return value. After decomposition, the call will
2607 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2608 * will be transformed into an LDADDR.
2610 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2611 loada->dreg = alloc_preg (cfg);
2612 loada->inst_p0 = temp;
2613 /* We reference the call too since call->dreg could change during optimization */
2614 loada->inst_p1 = call;
2615 MONO_ADD_INS (cfg->cbb, loada);
2617 call->inst.dreg = temp->dreg;
2619 call->vret_var = loada;
2620 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2621 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2623 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2624 if (COMPILE_SOFT_FLOAT (cfg)) {
2626 * If the call has a float argument, we would need to do an r8->r4 conversion using
2627 * an icall, but that cannot be done during the call sequence since it would clobber
2628 * the call registers + the stack. So we do it before emitting the call.
2630 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2632 MonoInst *in = call->args [i];
2634 if (i >= sig->hasthis)
2635 t = sig->params [i - sig->hasthis];
2637 t = &mono_defaults.int_class->byval_arg;
2638 t = mono_type_get_underlying_type (t);
2640 if (!t->byref && t->type == MONO_TYPE_R4) {
2641 MonoInst *iargs [1];
2645 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2647 /* The result will be in an int vreg */
2648 call->args [i] = conv;
2654 call->need_unbox_trampoline = unbox_trampoline;
2657 if (COMPILE_LLVM (cfg))
2658 mono_llvm_emit_call (cfg, call);
2660 mono_arch_emit_call (cfg, call);
2662 mono_arch_emit_call (cfg, call);
2665 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2666 cfg->flags |= MONO_CFG_HAS_CALLS;
2672 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2674 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2675 cfg->uses_rgctx_reg = TRUE;
2676 call->rgctx_reg = TRUE;
2678 call->rgctx_arg_reg = rgctx_reg;
2682 inline static MonoInst*
2683 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2688 gboolean check_sp = FALSE;
2690 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2691 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2693 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2698 rgctx_reg = mono_alloc_preg (cfg);
2699 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2703 if (!cfg->stack_inbalance_var)
2704 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2706 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2707 ins->dreg = cfg->stack_inbalance_var->dreg;
2708 MONO_ADD_INS (cfg->cbb, ins);
2711 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2713 call->inst.sreg1 = addr->dreg;
2716 emit_imt_argument (cfg, call, NULL, imt_arg);
2718 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2723 sp_reg = mono_alloc_preg (cfg);
2725 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2727 MONO_ADD_INS (cfg->cbb, ins);
2729 /* Restore the stack so we don't crash when throwing the exception */
2730 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2731 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2732 MONO_ADD_INS (cfg->cbb, ins);
2734 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2735 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2739 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2741 return (MonoInst*)call;
2745 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2748 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2750 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2753 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2754 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2756 #ifndef DISABLE_REMOTING
2757 gboolean might_be_remote = FALSE;
2759 gboolean virtual_ = this_ins != NULL;
2760 gboolean enable_for_aot = TRUE;
2763 MonoInst *call_target = NULL;
2765 gboolean need_unbox_trampoline;
2768 sig = mono_method_signature (method);
2770 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2771 g_assert_not_reached ();
2774 rgctx_reg = mono_alloc_preg (cfg);
2775 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2778 if (method->string_ctor) {
2779 /* Create the real signature */
2780 /* FIXME: Cache these */
2781 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2782 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2787 context_used = mini_method_check_context_used (cfg, method);
2789 #ifndef DISABLE_REMOTING
2790 might_be_remote = this_ins && sig->hasthis &&
2791 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2792 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2794 if (might_be_remote && context_used) {
2797 g_assert (cfg->gshared);
2799 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2801 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2805 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2806 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2808 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2810 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2812 #ifndef DISABLE_REMOTING
2813 if (might_be_remote)
2814 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2817 call->method = method;
2818 call->inst.flags |= MONO_INST_HAS_METHOD;
2819 call->inst.inst_left = this_ins;
2820 call->tail_call = tail;
2823 int vtable_reg, slot_reg, this_reg;
2826 this_reg = this_ins->dreg;
2828 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2829 MonoInst *dummy_use;
2831 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2833 /* Make a call to delegate->invoke_impl */
2834 call->inst.inst_basereg = this_reg;
2835 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2836 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2838 /* We must emit a dummy use here because the delegate trampoline will
2839 replace the 'this' argument with the delegate target making this activation
2840 no longer a root for the delegate.
2841 This is an issue for delegates that target collectible code such as dynamic
2842 methods of GC'able assemblies.
2844 For a test case look into #667921.
2846 FIXME: a dummy use is not the best way to do it as the local register allocator
2847 will put it on a caller save register and spil it around the call.
2848 Ideally, we would either put it on a callee save register or only do the store part.
2850 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2852 return (MonoInst*)call;
2855 if ((!cfg->compile_aot || enable_for_aot) &&
2856 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2857 (MONO_METHOD_IS_FINAL (method) &&
2858 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2859 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2861 * the method is not virtual, we just need to ensure this is not null
2862 * and then we can call the method directly.
2864 #ifndef DISABLE_REMOTING
2865 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2867 * The check above ensures method is not gshared, this is needed since
2868 * gshared methods can't have wrappers.
2870 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2874 if (!method->string_ctor)
2875 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2877 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2878 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2880 * the method is virtual, but we can statically dispatch since either
2881 * it's class or the method itself are sealed.
2882 * But first we need to ensure it's not a null reference.
2884 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2886 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2887 } else if (call_target) {
2888 vtable_reg = alloc_preg (cfg);
2889 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2891 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2892 call->inst.sreg1 = call_target->dreg;
2893 call->inst.flags &= !MONO_INST_HAS_METHOD;
2895 vtable_reg = alloc_preg (cfg);
2896 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2897 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2898 guint32 imt_slot = mono_method_get_imt_slot (method);
2899 emit_imt_argument (cfg, call, call->method, imt_arg);
2900 slot_reg = vtable_reg;
2901 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2903 slot_reg = vtable_reg;
2904 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2905 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2907 g_assert (mono_method_signature (method)->generic_param_count);
2908 emit_imt_argument (cfg, call, call->method, imt_arg);
2912 call->inst.sreg1 = slot_reg;
2913 call->inst.inst_offset = offset;
2914 call->is_virtual = TRUE;
2918 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2921 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2923 return (MonoInst*)call;
2927 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2929 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2933 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2940 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2943 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2945 return (MonoInst*)call;
2949 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2951 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2955 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2959 * mono_emit_abs_call:
2961 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2963 inline static MonoInst*
2964 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2965 MonoMethodSignature *sig, MonoInst **args)
2967 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2971 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2974 if (cfg->abs_patches == NULL)
2975 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2976 g_hash_table_insert (cfg->abs_patches, ji, ji);
2977 ins = mono_emit_native_call (cfg, ji, sig, args);
2978 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2982 static MonoMethodSignature*
2983 sig_to_rgctx_sig (MonoMethodSignature *sig)
2985 // FIXME: memory allocation
2986 MonoMethodSignature *res;
2989 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2990 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2991 res->param_count = sig->param_count + 1;
2992 for (i = 0; i < sig->param_count; ++i)
2993 res->params [i] = sig->params [i];
2994 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2998 /* Make an indirect call to FSIG passing an additional argument */
3000 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3002 MonoMethodSignature *csig;
3003 MonoInst *args_buf [16];
3005 int i, pindex, tmp_reg;
3007 /* Make a call with an rgctx/extra arg */
3008 if (fsig->param_count + 2 < 16)
3011 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3014 args [pindex ++] = orig_args [0];
3015 for (i = 0; i < fsig->param_count; ++i)
3016 args [pindex ++] = orig_args [fsig->hasthis + i];
3017 tmp_reg = alloc_preg (cfg);
3018 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3019 csig = sig_to_rgctx_sig (fsig);
3020 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3023 /* Emit an indirect call to the function descriptor ADDR */
3025 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3027 int addr_reg, arg_reg;
3028 MonoInst *call_target;
3030 g_assert (cfg->llvm_only);
3033 * addr points to a <addr, arg> pair, load both of them, and
3034 * make a call to addr, passing arg as an extra arg.
3036 addr_reg = alloc_preg (cfg);
3037 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3038 arg_reg = alloc_preg (cfg);
3039 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3041 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3045 direct_icalls_enabled (MonoCompile *cfg)
3047 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3049 if (cfg->compile_llvm)
3052 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3058 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3061 * Call the jit icall without a wrapper if possible.
3062 * The wrapper is needed for the following reasons:
3063 * - to handle exceptions thrown using mono_raise_exceptions () from the
3064 * icall function. The EH code needs the lmf frame pushed by the
3065 * wrapper to be able to unwind back to managed code.
3066 * - to be able to do stack walks for asynchronously suspended
3067 * threads when debugging.
3069 if (info->no_raise && direct_icalls_enabled (cfg)) {
3073 if (!info->wrapper_method) {
3074 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3075 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3077 mono_memory_barrier ();
3081 * Inline the wrapper method, which is basically a call to the C icall, and
3082 * an exception check.
3084 costs = inline_method (cfg, info->wrapper_method, NULL,
3085 args, NULL, cfg->real_offset, TRUE);
3086 g_assert (costs > 0);
3087 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3091 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3096 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3098 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3099 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3103 * Native code might return non register sized integers
3104 * without initializing the upper bits.
3106 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3107 case OP_LOADI1_MEMBASE:
3108 widen_op = OP_ICONV_TO_I1;
3110 case OP_LOADU1_MEMBASE:
3111 widen_op = OP_ICONV_TO_U1;
3113 case OP_LOADI2_MEMBASE:
3114 widen_op = OP_ICONV_TO_I2;
3116 case OP_LOADU2_MEMBASE:
3117 widen_op = OP_ICONV_TO_U2;
3123 if (widen_op != -1) {
3124 int dreg = alloc_preg (cfg);
3127 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3128 widen->type = ins->type;
3138 get_memcpy_method (void)
3140 static MonoMethod *memcpy_method = NULL;
3141 if (!memcpy_method) {
3142 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3144 g_error ("Old corlib found. Install a new one");
3146 return memcpy_method;
3150 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3152 MonoClassField *field;
3153 gpointer iter = NULL;
3155 while ((field = mono_class_get_fields (klass, &iter))) {
3158 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3160 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3161 if (mini_type_is_reference (mono_field_get_type (field))) {
3162 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3163 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3165 MonoClass *field_class = mono_class_from_mono_type (field->type);
3166 if (field_class->has_references)
3167 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3173 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3175 int card_table_shift_bits;
3176 gpointer card_table_mask;
3178 MonoInst *dummy_use;
3179 int nursery_shift_bits;
3180 size_t nursery_size;
3182 if (!cfg->gen_write_barriers)
3185 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3187 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3189 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3192 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3193 wbarrier->sreg1 = ptr->dreg;
3194 wbarrier->sreg2 = value->dreg;
3195 MONO_ADD_INS (cfg->cbb, wbarrier);
3196 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3197 int offset_reg = alloc_preg (cfg);
3201 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3202 if (card_table_mask)
3203 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3205 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3206 * IMM's larger than 32bits.
3208 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3209 card_reg = ins->dreg;
3211 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3212 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3214 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3215 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3218 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3222 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3224 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3225 unsigned need_wb = 0;
3230 /*types with references can't have alignment smaller than sizeof(void*) */
3231 if (align < SIZEOF_VOID_P)
3234 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3235 if (size > 32 * SIZEOF_VOID_P)
3238 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3240 /* We don't unroll more than 5 stores to avoid code bloat. */
3241 if (size > 5 * SIZEOF_VOID_P) {
3242 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3243 size += (SIZEOF_VOID_P - 1);
3244 size &= ~(SIZEOF_VOID_P - 1);
3246 EMIT_NEW_ICONST (cfg, iargs [2], size);
3247 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3248 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3252 destreg = iargs [0]->dreg;
3253 srcreg = iargs [1]->dreg;
3256 dest_ptr_reg = alloc_preg (cfg);
3257 tmp_reg = alloc_preg (cfg);
3260 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3262 while (size >= SIZEOF_VOID_P) {
3263 MonoInst *load_inst;
3264 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3265 load_inst->dreg = tmp_reg;
3266 load_inst->inst_basereg = srcreg;
3267 load_inst->inst_offset = offset;
3268 MONO_ADD_INS (cfg->cbb, load_inst);
3270 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3273 emit_write_barrier (cfg, iargs [0], load_inst);
3275 offset += SIZEOF_VOID_P;
3276 size -= SIZEOF_VOID_P;
3279 /*tmp += sizeof (void*)*/
3280 if (size >= SIZEOF_VOID_P) {
3281 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3282 MONO_ADD_INS (cfg->cbb, iargs [0]);
3286 /* Those cannot be references since size < sizeof (void*) */
3288 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3289 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3295 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3296 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3302 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3303 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3312 * Emit code to copy a valuetype of type @klass whose address is stored in
3313 * @src->dreg to memory whose address is stored at @dest->dreg.
3316 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3318 MonoInst *iargs [4];
3321 MonoMethod *memcpy_method;
3322 MonoInst *size_ins = NULL;
3323 MonoInst *memcpy_ins = NULL;
3327 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3330 * This check breaks with spilled vars... need to handle it during verification anyway.
3331 * g_assert (klass && klass == src->klass && klass == dest->klass);
3334 if (mini_is_gsharedvt_klass (klass)) {
3336 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3337 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3341 n = mono_class_native_size (klass, &align);
3343 n = mono_class_value_size (klass, &align);
3345 /* if native is true there should be no references in the struct */
3346 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3347 /* Avoid barriers when storing to the stack */
3348 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3349 (dest->opcode == OP_LDADDR))) {
3355 context_used = mini_class_check_context_used (cfg, klass);
3357 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3358 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3360 } else if (context_used) {
3361 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3363 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3364 if (!cfg->compile_aot)
3365 mono_class_compute_gc_descriptor (klass);
3369 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3371 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3376 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3377 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3378 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3383 iargs [2] = size_ins;
3385 EMIT_NEW_ICONST (cfg, iargs [2], n);
3387 memcpy_method = get_memcpy_method ();
3389 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3391 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3396 get_memset_method (void)
3398 static MonoMethod *memset_method = NULL;
3399 if (!memset_method) {
3400 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3402 g_error ("Old corlib found. Install a new one");
3404 return memset_method;
3408 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3410 MonoInst *iargs [3];
3413 MonoMethod *memset_method;
3414 MonoInst *size_ins = NULL;
3415 MonoInst *bzero_ins = NULL;
3416 static MonoMethod *bzero_method;
3418 /* FIXME: Optimize this for the case when dest is an LDADDR */
3419 mono_class_init (klass);
3420 if (mini_is_gsharedvt_klass (klass)) {
3421 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3422 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3424 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3425 g_assert (bzero_method);
3427 iargs [1] = size_ins;
3428 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3432 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3434 n = mono_class_value_size (klass, &align);
3436 if (n <= sizeof (gpointer) * 8) {
3437 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3440 memset_method = get_memset_method ();
3442 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3443 EMIT_NEW_ICONST (cfg, iargs [2], n);
3444 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3451 * Emit IR to return either the this pointer for instance method,
3452 * or the mrgctx for static methods.
3455 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3457 MonoInst *this_ins = NULL;
3459 g_assert (cfg->gshared);
3461 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3462 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3463 !method->klass->valuetype)
3464 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3466 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3467 MonoInst *mrgctx_loc, *mrgctx_var;
3469 g_assert (!this_ins);
3470 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3472 mrgctx_loc = mono_get_vtable_var (cfg);
3473 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3476 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3477 MonoInst *vtable_loc, *vtable_var;
3479 g_assert (!this_ins);
3481 vtable_loc = mono_get_vtable_var (cfg);
3482 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3484 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3485 MonoInst *mrgctx_var = vtable_var;
3488 vtable_reg = alloc_preg (cfg);
3489 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3490 vtable_var->type = STACK_PTR;
3498 vtable_reg = alloc_preg (cfg);
3499 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3504 static MonoJumpInfoRgctxEntry *
3505 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3507 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3508 res->method = method;
3509 res->in_mrgctx = in_mrgctx;
3510 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3511 res->data->type = patch_type;
3512 res->data->data.target = patch_data;
3513 res->info_type = info_type;
3518 static inline MonoInst*
3519 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3521 MonoInst *args [16];
3524 // FIXME: No fastpath since the slot is not a compile time constant
3526 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3527 if (entry->in_mrgctx)
3528 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3530 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3534 * FIXME: This can be called during decompose, which is a problem since it creates
3536 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3538 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3540 MonoBasicBlock *is_null_bb, *end_bb;
3541 MonoInst *res, *ins, *call;
3544 slot = mini_get_rgctx_entry_slot (entry);
3546 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3547 index = MONO_RGCTX_SLOT_INDEX (slot);
3549 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3550 for (depth = 0; ; ++depth) {
3551 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3553 if (index < size - 1)
3558 NEW_BBLOCK (cfg, end_bb);
3559 NEW_BBLOCK (cfg, is_null_bb);
3562 rgctx_reg = rgctx->dreg;
3564 rgctx_reg = alloc_preg (cfg);
3566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3567 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3568 NEW_BBLOCK (cfg, is_null_bb);
3570 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3571 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3574 for (i = 0; i < depth; ++i) {
3575 int array_reg = alloc_preg (cfg);
3577 /* load ptr to next array */
3578 if (mrgctx && i == 0)
3579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3582 rgctx_reg = array_reg;
3583 /* is the ptr null? */
3584 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3585 /* if yes, jump to actual trampoline */
3586 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3590 val_reg = alloc_preg (cfg);
3591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3592 /* is the slot null? */
3593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3594 /* if yes, jump to actual trampoline */
3595 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3598 res_reg = alloc_preg (cfg);
3599 MONO_INST_NEW (cfg, ins, OP_MOVE);
3600 ins->dreg = res_reg;
3601 ins->sreg1 = val_reg;
3602 MONO_ADD_INS (cfg->cbb, ins);
3604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3607 MONO_START_BB (cfg, is_null_bb);
3609 EMIT_NEW_ICONST (cfg, args [1], index);
3611 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3613 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3614 MONO_INST_NEW (cfg, ins, OP_MOVE);
3615 ins->dreg = res_reg;
3616 ins->sreg1 = call->dreg;
3617 MONO_ADD_INS (cfg->cbb, ins);
3618 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3620 MONO_START_BB (cfg, end_bb);
3629 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3632 static inline MonoInst*
3633 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3636 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3638 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3642 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3643 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3645 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3646 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3648 return emit_rgctx_fetch (cfg, rgctx, entry);
3652 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3653 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3655 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3656 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3658 return emit_rgctx_fetch (cfg, rgctx, entry);
3662 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3663 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3665 MonoJumpInfoGSharedVtCall *call_info;
3666 MonoJumpInfoRgctxEntry *entry;
3669 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3670 call_info->sig = sig;
3671 call_info->method = cmethod;
3673 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3674 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3676 return emit_rgctx_fetch (cfg, rgctx, entry);
3680 * emit_get_rgctx_virt_method:
3682 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3685 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3686 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3688 MonoJumpInfoVirtMethod *info;
3689 MonoJumpInfoRgctxEntry *entry;
3692 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3693 info->klass = klass;
3694 info->method = virt_method;
3696 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3697 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3699 return emit_rgctx_fetch (cfg, rgctx, entry);
3703 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3704 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3706 MonoJumpInfoRgctxEntry *entry;
3709 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3710 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3712 return emit_rgctx_fetch (cfg, rgctx, entry);
3716 * emit_get_rgctx_method:
3718 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3719 * normal constants, else emit a load from the rgctx.
3722 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3723 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3725 if (!context_used) {
3728 switch (rgctx_type) {
3729 case MONO_RGCTX_INFO_METHOD:
3730 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3732 case MONO_RGCTX_INFO_METHOD_RGCTX:
3733 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3736 g_assert_not_reached ();
3739 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3740 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3742 return emit_rgctx_fetch (cfg, rgctx, entry);
3747 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3748 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3750 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3751 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3753 return emit_rgctx_fetch (cfg, rgctx, entry);
3757 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3759 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3760 MonoRuntimeGenericContextInfoTemplate *template_;
3765 for (i = 0; i < info->num_entries; ++i) {
3766 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3768 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3772 if (info->num_entries == info->count_entries) {
3773 MonoRuntimeGenericContextInfoTemplate *new_entries;
3774 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3776 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3778 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3779 info->entries = new_entries;
3780 info->count_entries = new_count_entries;
3783 idx = info->num_entries;
3784 template_ = &info->entries [idx];
3785 template_->info_type = rgctx_type;
3786 template_->data = data;
3788 info->num_entries ++;
3794 * emit_get_gsharedvt_info:
3796 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3799 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3804 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3805 /* Load info->entries [idx] */
3806 dreg = alloc_preg (cfg);
3807 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3813 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3815 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3819 * On return the caller must check @klass for load errors.
3822 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3824 MonoInst *vtable_arg;
3827 context_used = mini_class_check_context_used (cfg, klass);
3830 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3831 klass, MONO_RGCTX_INFO_VTABLE);
3833 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3837 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3840 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3844 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3845 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3847 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3848 ins->sreg1 = vtable_arg->dreg;
3849 MONO_ADD_INS (cfg->cbb, ins);
3851 static int byte_offset = -1;
3852 static guint8 bitmask;
3853 int bits_reg, inited_reg;
3854 MonoBasicBlock *inited_bb;
3855 MonoInst *args [16];
3857 if (byte_offset < 0)
3858 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3860 bits_reg = alloc_ireg (cfg);
3861 inited_reg = alloc_ireg (cfg);
3863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3864 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3866 NEW_BBLOCK (cfg, inited_bb);
3868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3869 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3871 args [0] = vtable_arg;
3872 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3874 MONO_START_BB (cfg, inited_bb);
3879 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3883 if (cfg->gen_seq_points && cfg->method == method) {
3884 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3886 ins->flags |= MONO_INST_NONEMPTY_STACK;
3887 MONO_ADD_INS (cfg->cbb, ins);
3892 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3894 if (mini_get_debug_options ()->better_cast_details) {
3895 int vtable_reg = alloc_preg (cfg);
3896 int klass_reg = alloc_preg (cfg);
3897 MonoBasicBlock *is_null_bb = NULL;
3899 int to_klass_reg, context_used;
3902 NEW_BBLOCK (cfg, is_null_bb);
3904 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3905 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3908 tls_get = mono_get_jit_tls_intrinsic (cfg);
3910 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3914 MONO_ADD_INS (cfg->cbb, tls_get);
3915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3918 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3920 context_used = mini_class_check_context_used (cfg, klass);
3922 MonoInst *class_ins;
3924 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3925 to_klass_reg = class_ins->dreg;
3927 to_klass_reg = alloc_preg (cfg);
3928 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3930 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3933 MONO_START_BB (cfg, is_null_bb);
3938 reset_cast_details (MonoCompile *cfg)
3940 /* Reset the variables holding the cast details */
3941 if (mini_get_debug_options ()->better_cast_details) {
3942 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3944 MONO_ADD_INS (cfg->cbb, tls_get);
3945 /* It is enough to reset the from field */
3946 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3951 * On return the caller must check @array_class for load errors
3954 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3956 int vtable_reg = alloc_preg (cfg);
3959 context_used = mini_class_check_context_used (cfg, array_class);
3961 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3963 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3965 if (cfg->opt & MONO_OPT_SHARED) {
3966 int class_reg = alloc_preg (cfg);
3969 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3970 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3971 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3972 } else if (context_used) {
3973 MonoInst *vtable_ins;
3975 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3976 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3978 if (cfg->compile_aot) {
3982 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3984 vt_reg = alloc_preg (cfg);
3985 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3986 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3989 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3991 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3995 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3997 reset_cast_details (cfg);
4001 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4002 * generic code is generated.
4005 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4007 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4010 MonoInst *rgctx, *addr;
4012 /* FIXME: What if the class is shared? We might not
4013 have to get the address of the method from the
4015 addr = emit_get_rgctx_method (cfg, context_used, method,
4016 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4017 if (cfg->llvm_only && cfg->gsharedvt) {
4018 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4020 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4022 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4025 gboolean pass_vtable, pass_mrgctx;
4026 MonoInst *rgctx_arg = NULL;
4028 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4029 g_assert (!pass_mrgctx);
4032 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4035 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4038 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4043 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4047 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4048 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4049 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4050 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4052 obj_reg = sp [0]->dreg;
4053 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4054 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4056 /* FIXME: generics */
4057 g_assert (klass->rank == 0);
4060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4061 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4067 MonoInst *element_class;
4069 /* This assertion is from the unboxcast insn */
4070 g_assert (klass->rank == 0);
4072 element_class = emit_get_rgctx_klass (cfg, context_used,
4073 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4075 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4076 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4078 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4079 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4080 reset_cast_details (cfg);
4083 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4084 MONO_ADD_INS (cfg->cbb, add);
4085 add->type = STACK_MP;
4092 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4094 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4095 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4099 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4105 args [1] = klass_inst;
4108 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4110 NEW_BBLOCK (cfg, is_ref_bb);
4111 NEW_BBLOCK (cfg, is_nullable_bb);
4112 NEW_BBLOCK (cfg, end_bb);
4113 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4114 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4115 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4118 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4120 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4121 addr_reg = alloc_dreg (cfg, STACK_MP);
4125 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4126 MONO_ADD_INS (cfg->cbb, addr);
4128 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4131 MONO_START_BB (cfg, is_ref_bb);
4133 /* Save the ref to a temporary */
4134 dreg = alloc_ireg (cfg);
4135 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4136 addr->dreg = addr_reg;
4137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4138 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4141 MONO_START_BB (cfg, is_nullable_bb);
4144 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4145 MonoInst *unbox_call;
4146 MonoMethodSignature *unbox_sig;
4148 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4149 unbox_sig->ret = &klass->byval_arg;
4150 unbox_sig->param_count = 1;
4151 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4154 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4156 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4158 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4159 addr->dreg = addr_reg;
4162 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4165 MONO_START_BB (cfg, end_bb);
4168 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4174 * Returns NULL and set the cfg exception on error.
4177 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4179 MonoInst *iargs [2];
4184 MonoRgctxInfoType rgctx_info;
4185 MonoInst *iargs [2];
4186 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4188 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4190 if (cfg->opt & MONO_OPT_SHARED)
4191 rgctx_info = MONO_RGCTX_INFO_KLASS;
4193 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4194 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4196 if (cfg->opt & MONO_OPT_SHARED) {
4197 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4199 alloc_ftn = ves_icall_object_new;
4202 alloc_ftn = ves_icall_object_new_specific;
4205 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4206 if (known_instance_size) {
4207 int size = mono_class_instance_size (klass);
4208 if (size < sizeof (MonoObject))
4209 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4211 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4213 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4216 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4219 if (cfg->opt & MONO_OPT_SHARED) {
4220 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4221 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4223 alloc_ftn = ves_icall_object_new;
4224 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4225 /* This happens often in argument checking code, eg. throw new FooException... */
4226 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4227 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4228 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4230 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4231 MonoMethod *managed_alloc = NULL;
4235 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4236 cfg->exception_ptr = klass;
4240 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4242 if (managed_alloc) {
4243 int size = mono_class_instance_size (klass);
4244 if (size < sizeof (MonoObject))
4245 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4247 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4248 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4249 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4251 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4253 guint32 lw = vtable->klass->instance_size;
4254 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4255 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4256 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4259 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4263 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4267 * Returns NULL and set the cfg exception on error.
4270 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4272 MonoInst *alloc, *ins;
4274 if (mono_class_is_nullable (klass)) {
4275 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4278 if (cfg->llvm_only && cfg->gsharedvt) {
4279 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4280 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4281 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4283 /* FIXME: What if the class is shared? We might not
4284 have to get the method address from the RGCTX. */
4285 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4286 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4287 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4289 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4292 gboolean pass_vtable, pass_mrgctx;
4293 MonoInst *rgctx_arg = NULL;
4295 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4296 g_assert (!pass_mrgctx);
4299 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4302 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4305 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4309 if (mini_is_gsharedvt_klass (klass)) {
4310 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4311 MonoInst *res, *is_ref, *src_var, *addr;
4314 dreg = alloc_ireg (cfg);
4316 NEW_BBLOCK (cfg, is_ref_bb);
4317 NEW_BBLOCK (cfg, is_nullable_bb);
4318 NEW_BBLOCK (cfg, end_bb);
4319 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4321 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4323 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4324 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4327 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4330 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4331 ins->opcode = OP_STOREV_MEMBASE;
4333 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4334 res->type = STACK_OBJ;
4336 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4339 MONO_START_BB (cfg, is_ref_bb);
4341 /* val is a vtype, so has to load the value manually */
4342 src_var = get_vreg_to_inst (cfg, val->dreg);
4344 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4345 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4346 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4347 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4350 MONO_START_BB (cfg, is_nullable_bb);
4353 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4354 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4356 MonoMethodSignature *box_sig;
4359 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4360 * construct that method at JIT time, so have to do things by hand.
4362 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4363 box_sig->ret = &mono_defaults.object_class->byval_arg;
4364 box_sig->param_count = 1;
4365 box_sig->params [0] = &klass->byval_arg;
4368 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4370 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4371 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4372 res->type = STACK_OBJ;
4376 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4378 MONO_START_BB (cfg, end_bb);
4382 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4386 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4392 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4395 MonoGenericContainer *container;
4396 MonoGenericInst *ginst;
4398 if (klass->generic_class) {
4399 container = klass->generic_class->container_class->generic_container;
4400 ginst = klass->generic_class->context.class_inst;
4401 } else if (klass->generic_container && context_used) {
4402 container = klass->generic_container;
4403 ginst = container->context.class_inst;
4408 for (i = 0; i < container->type_argc; ++i) {
4410 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4412 type = ginst->type_argv [i];
4413 if (mini_type_is_reference (type))
4419 static GHashTable* direct_icall_type_hash;
4422 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4424 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4425 if (!direct_icalls_enabled (cfg))
4429 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4430 * Whitelist a few icalls for now.
4432 if (!direct_icall_type_hash) {
4433 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4435 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4436 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4437 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4438 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4439 mono_memory_barrier ();
4440 direct_icall_type_hash = h;
4443 if (cmethod->klass == mono_defaults.math_class)
4445 /* No locking needed */
4446 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4451 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4454 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4456 MonoMethod *mono_castclass;
4459 mono_castclass = mono_marshal_get_castclass_with_cache ();
4461 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4462 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4463 reset_cast_details (cfg);
4469 get_castclass_cache_idx (MonoCompile *cfg)
4471 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4472 cfg->castclass_cache_index ++;
4473 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4477 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4486 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4489 idx = get_castclass_cache_idx (cfg);
4490 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4492 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4493 return emit_castclass_with_cache (cfg, klass, args);
4497 * Returns NULL and set the cfg exception on error.
4500 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4502 MonoBasicBlock *is_null_bb;
4503 int obj_reg = src->dreg;
4504 int vtable_reg = alloc_preg (cfg);
4506 MonoInst *klass_inst = NULL, *res;
4508 context_used = mini_class_check_context_used (cfg, klass);
4510 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4511 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4512 (*inline_costs) += 2;
4514 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4515 MonoMethod *mono_castclass;
4516 MonoInst *iargs [1];
4519 mono_castclass = mono_marshal_get_castclass (klass);
4522 save_cast_details (cfg, klass, src->dreg, TRUE);
4523 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4524 iargs, ip, cfg->real_offset, TRUE);
4525 reset_cast_details (cfg);
4526 CHECK_CFG_EXCEPTION;
4527 g_assert (costs > 0);
4529 cfg->real_offset += 5;
4531 (*inline_costs) += costs;
4539 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4540 MonoInst *cache_ins;
4542 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4547 /* klass - it's the second element of the cache entry*/
4548 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4551 args [2] = cache_ins;
4553 return emit_castclass_with_cache (cfg, klass, args);
4556 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4559 NEW_BBLOCK (cfg, is_null_bb);
4561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4564 save_cast_details (cfg, klass, obj_reg, FALSE);
4566 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4568 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4570 int klass_reg = alloc_preg (cfg);
4572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4574 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4575 /* the remoting code is broken, access the class for now */
4576 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4577 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4579 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4580 cfg->exception_ptr = klass;
4583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4588 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4591 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4595 MONO_START_BB (cfg, is_null_bb);
4597 reset_cast_details (cfg);
4606 * Returns NULL and set the cfg exception on error.
4609 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4612 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4613 int obj_reg = src->dreg;
4614 int vtable_reg = alloc_preg (cfg);
4615 int res_reg = alloc_ireg_ref (cfg);
4616 MonoInst *klass_inst = NULL;
4621 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4622 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4623 MonoInst *cache_ins;
4625 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4630 /* klass - it's the second element of the cache entry*/
4631 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4634 args [2] = cache_ins;
4636 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4639 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4642 NEW_BBLOCK (cfg, is_null_bb);
4643 NEW_BBLOCK (cfg, false_bb);
4644 NEW_BBLOCK (cfg, end_bb);
4646 /* Do the assignment at the beginning, so the other assignment can be if converted */
4647 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4648 ins->type = STACK_OBJ;
4651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4652 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4656 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4657 g_assert (!context_used);
4658 /* the is_null_bb target simply copies the input register to the output */
4659 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4661 int klass_reg = alloc_preg (cfg);
4664 int rank_reg = alloc_preg (cfg);
4665 int eclass_reg = alloc_preg (cfg);
4667 g_assert (!context_used);
4668 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4669 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4673 if (klass->cast_class == mono_defaults.object_class) {
4674 int parent_reg = alloc_preg (cfg);
4675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4676 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4677 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4679 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4680 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4681 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4683 } else if (klass->cast_class == mono_defaults.enum_class) {
4684 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4686 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4687 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4689 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4690 /* Check that the object is a vector too */
4691 int bounds_reg = alloc_preg (cfg);
4692 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4694 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4697 /* the is_null_bb target simply copies the input register to the output */
4698 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4700 } else if (mono_class_is_nullable (klass)) {
4701 g_assert (!context_used);
4702 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4703 /* the is_null_bb target simply copies the input register to the output */
4704 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4706 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4707 g_assert (!context_used);
4708 /* the remoting code is broken, access the class for now */
4709 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4710 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4712 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4713 cfg->exception_ptr = klass;
4716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4718 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4721 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4722 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4725 /* the is_null_bb target simply copies the input register to the output */
4726 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4731 MONO_START_BB (cfg, false_bb);
4733 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4736 MONO_START_BB (cfg, is_null_bb);
4738 MONO_START_BB (cfg, end_bb);
4744 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4746 /* This opcode takes as input an object reference and a class, and returns:
4747 0) if the object is an instance of the class,
4748 1) if the object is not instance of the class,
4749 2) if the object is a proxy whose type cannot be determined */
4752 #ifndef DISABLE_REMOTING
4753 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4755 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4757 int obj_reg = src->dreg;
4758 int dreg = alloc_ireg (cfg);
4760 #ifndef DISABLE_REMOTING
4761 int klass_reg = alloc_preg (cfg);
4764 NEW_BBLOCK (cfg, true_bb);
4765 NEW_BBLOCK (cfg, false_bb);
4766 NEW_BBLOCK (cfg, end_bb);
4767 #ifndef DISABLE_REMOTING
4768 NEW_BBLOCK (cfg, false2_bb);
4769 NEW_BBLOCK (cfg, no_proxy_bb);
4772 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4773 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4775 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4776 #ifndef DISABLE_REMOTING
4777 NEW_BBLOCK (cfg, interface_fail_bb);
4780 tmp_reg = alloc_preg (cfg);
4781 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4782 #ifndef DISABLE_REMOTING
4783 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4784 MONO_START_BB (cfg, interface_fail_bb);
4785 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4787 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4789 tmp_reg = alloc_preg (cfg);
4790 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4791 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4792 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4794 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4797 #ifndef DISABLE_REMOTING
4798 tmp_reg = alloc_preg (cfg);
4799 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4800 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4802 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4803 tmp_reg = alloc_preg (cfg);
4804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4807 tmp_reg = alloc_preg (cfg);
4808 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4809 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4810 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4812 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4813 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4815 MONO_START_BB (cfg, no_proxy_bb);
4817 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4819 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4823 MONO_START_BB (cfg, false_bb);
4825 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4826 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4828 #ifndef DISABLE_REMOTING
4829 MONO_START_BB (cfg, false2_bb);
4831 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4832 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4835 MONO_START_BB (cfg, true_bb);
4837 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4839 MONO_START_BB (cfg, end_bb);
4842 MONO_INST_NEW (cfg, ins, OP_ICONST);
4844 ins->type = STACK_I4;
4850 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4852 /* This opcode takes as input an object reference and a class, and returns:
4853 0) if the object is an instance of the class,
4854 1) if the object is a proxy whose type cannot be determined
4855 an InvalidCastException exception is thrown otherwhise*/
4858 #ifndef DISABLE_REMOTING
4859 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4861 MonoBasicBlock *ok_result_bb;
4863 int obj_reg = src->dreg;
4864 int dreg = alloc_ireg (cfg);
4865 int tmp_reg = alloc_preg (cfg);
4867 #ifndef DISABLE_REMOTING
4868 int klass_reg = alloc_preg (cfg);
4869 NEW_BBLOCK (cfg, end_bb);
4872 NEW_BBLOCK (cfg, ok_result_bb);
4874 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4875 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4877 save_cast_details (cfg, klass, obj_reg, FALSE);
4879 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4880 #ifndef DISABLE_REMOTING
4881 NEW_BBLOCK (cfg, interface_fail_bb);
4883 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4884 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4885 MONO_START_BB (cfg, interface_fail_bb);
4886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4888 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4890 tmp_reg = alloc_preg (cfg);
4891 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4892 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4893 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4895 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4896 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4898 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4899 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4900 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4903 #ifndef DISABLE_REMOTING
4904 NEW_BBLOCK (cfg, no_proxy_bb);
4906 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4907 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4908 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4910 tmp_reg = alloc_preg (cfg);
4911 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4914 tmp_reg = alloc_preg (cfg);
4915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4917 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4919 NEW_BBLOCK (cfg, fail_1_bb);
4921 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4923 MONO_START_BB (cfg, fail_1_bb);
4925 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4926 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4928 MONO_START_BB (cfg, no_proxy_bb);
4930 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4932 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4936 MONO_START_BB (cfg, ok_result_bb);
4938 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4940 #ifndef DISABLE_REMOTING
4941 MONO_START_BB (cfg, end_bb);
4945 MONO_INST_NEW (cfg, ins, OP_ICONST);
4947 ins->type = STACK_I4;
4952 static G_GNUC_UNUSED MonoInst*
4953 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4955 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4956 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4959 switch (enum_type->type) {
4962 #if SIZEOF_REGISTER == 8
4974 MonoInst *load, *and_, *cmp, *ceq;
4975 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4976 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4977 int dest_reg = alloc_ireg (cfg);
4979 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4980 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4981 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4982 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4984 ceq->type = STACK_I4;
4987 load = mono_decompose_opcode (cfg, load);
4988 and_ = mono_decompose_opcode (cfg, and_);
4989 cmp = mono_decompose_opcode (cfg, cmp);
4990 ceq = mono_decompose_opcode (cfg, ceq);
4998 * Returns NULL and set the cfg exception on error.
5000 static G_GNUC_UNUSED MonoInst*
5001 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5005 gpointer trampoline;
5006 MonoInst *obj, *method_ins, *tramp_ins;
5010 if (virtual_ && !cfg->llvm_only) {
5011 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5014 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5018 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5022 /* Inline the contents of mono_delegate_ctor */
5024 /* Set target field */
5025 /* Optimize away setting of NULL target */
5026 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5027 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5028 if (cfg->gen_write_barriers) {
5029 dreg = alloc_preg (cfg);
5030 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5031 emit_write_barrier (cfg, ptr, target);
5035 /* Set method field */
5036 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5037 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5040 * To avoid looking up the compiled code belonging to the target method
5041 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5042 * store it, and we fill it after the method has been compiled.
5044 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5045 MonoInst *code_slot_ins;
5048 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5050 domain = mono_domain_get ();
5051 mono_domain_lock (domain);
5052 if (!domain_jit_info (domain)->method_code_hash)
5053 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5054 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5056 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5057 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5059 mono_domain_unlock (domain);
5061 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5063 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5066 if (cfg->llvm_only) {
5067 MonoInst *args [16];
5072 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5073 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5076 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5082 if (cfg->compile_aot) {
5083 MonoDelegateClassMethodPair *del_tramp;
5085 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5086 del_tramp->klass = klass;
5087 del_tramp->method = context_used ? NULL : method;
5088 del_tramp->is_virtual = virtual_;
5089 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5092 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5094 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5095 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5098 /* Set invoke_impl field */
5100 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5102 dreg = alloc_preg (cfg);
5103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5104 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5106 dreg = alloc_preg (cfg);
5107 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5108 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5111 dreg = alloc_preg (cfg);
5112 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5113 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5115 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5121 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5123 MonoJitICallInfo *info;
5125 /* Need to register the icall so it gets an icall wrapper */
5126 info = mono_get_array_new_va_icall (rank);
5128 cfg->flags |= MONO_CFG_HAS_VARARGS;
5130 /* mono_array_new_va () needs a vararg calling convention */
5131 cfg->exception_message = g_strdup ("array-new");
5132 cfg->disable_llvm = TRUE;
5134 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5135 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5139 * handle_constrained_gsharedvt_call:
5141 * Handle constrained calls where the receiver is a gsharedvt type.
5142 * Return the instruction representing the call. Set the cfg exception on failure.
5145 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5146 gboolean *ref_emit_widen)
5148 MonoInst *ins = NULL;
5149 gboolean emit_widen = *ref_emit_widen;
5152 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5153 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5154 * pack the arguments into an array, and do the rest of the work in in an icall.
5156 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5157 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5158 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5159 MonoInst *args [16];
5162 * This case handles calls to
5163 * - object:ToString()/Equals()/GetHashCode(),
5164 * - System.IComparable<T>:CompareTo()
5165 * - System.IEquatable<T>:Equals ()
5166 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5170 if (mono_method_check_context_used (cmethod))
5171 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5173 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5174 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5176 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5177 if (fsig->hasthis && fsig->param_count) {
5178 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5179 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5180 ins->dreg = alloc_preg (cfg);
5181 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5182 MONO_ADD_INS (cfg->cbb, ins);
5185 if (mini_is_gsharedvt_type (fsig->params [0])) {
5186 int addr_reg, deref_arg_reg;
5188 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5189 deref_arg_reg = alloc_preg (cfg);
5190 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5191 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5193 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5194 addr_reg = ins->dreg;
5195 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5197 EMIT_NEW_ICONST (cfg, args [3], 0);
5198 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5201 EMIT_NEW_ICONST (cfg, args [3], 0);
5202 EMIT_NEW_ICONST (cfg, args [4], 0);
5204 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5207 if (mini_is_gsharedvt_type (fsig->ret)) {
5208 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5209 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5213 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5214 MONO_ADD_INS (cfg->cbb, add);
5216 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5217 MONO_ADD_INS (cfg->cbb, ins);
5218 /* ins represents the call result */
5221 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5224 *ref_emit_widen = emit_widen;
5233 mono_emit_load_got_addr (MonoCompile *cfg)
5235 MonoInst *getaddr, *dummy_use;
5237 if (!cfg->got_var || cfg->got_var_allocated)
5240 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5241 getaddr->cil_code = cfg->header->code;
5242 getaddr->dreg = cfg->got_var->dreg;
5244 /* Add it to the start of the first bblock */
5245 if (cfg->bb_entry->code) {
5246 getaddr->next = cfg->bb_entry->code;
5247 cfg->bb_entry->code = getaddr;
5250 MONO_ADD_INS (cfg->bb_entry, getaddr);
5252 cfg->got_var_allocated = TRUE;
5255 * Add a dummy use to keep the got_var alive, since real uses might
5256 * only be generated by the back ends.
5257 * Add it to end_bblock, so the variable's lifetime covers the whole
5259 * It would be better to make the usage of the got var explicit in all
5260 * cases when the backend needs it (i.e. calls, throw etc.), so this
5261 * wouldn't be needed.
5263 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5264 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5267 static int inline_limit;
5268 static gboolean inline_limit_inited;
5271 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5273 MonoMethodHeaderSummary header;
5275 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5276 MonoMethodSignature *sig = mono_method_signature (method);
5280 if (cfg->disable_inline)
5285 if (cfg->inline_depth > 10)
5288 if (!mono_method_get_header_summary (method, &header))
5291 /*runtime, icall and pinvoke are checked by summary call*/
5292 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5293 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5294 (mono_class_is_marshalbyref (method->klass)) ||
5298 /* also consider num_locals? */
5299 /* Do the size check early to avoid creating vtables */
5300 if (!inline_limit_inited) {
5301 if (g_getenv ("MONO_INLINELIMIT"))
5302 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5304 inline_limit = INLINE_LENGTH_LIMIT;
5305 inline_limit_inited = TRUE;
5307 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5311 * if we can initialize the class of the method right away, we do,
5312 * otherwise we don't allow inlining if the class needs initialization,
5313 * since it would mean inserting a call to mono_runtime_class_init()
5314 * inside the inlined code
5316 if (!(cfg->opt & MONO_OPT_SHARED)) {
5317 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5318 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5319 vtable = mono_class_vtable (cfg->domain, method->klass);
5322 if (!cfg->compile_aot)
5323 mono_runtime_class_init (vtable);
5324 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5325 if (cfg->run_cctors && method->klass->has_cctor) {
5326 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5327 if (!method->klass->runtime_info)
5328 /* No vtable created yet */
5330 vtable = mono_class_vtable (cfg->domain, method->klass);
5333 /* This makes so that inline cannot trigger */
5334 /* .cctors: too many apps depend on them */
5335 /* running with a specific order... */
5336 if (! vtable->initialized)
5338 mono_runtime_class_init (vtable);
5340 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5341 if (!method->klass->runtime_info)
5342 /* No vtable created yet */
5344 vtable = mono_class_vtable (cfg->domain, method->klass);
5347 if (!vtable->initialized)
5352 * If we're compiling for shared code
5353 * the cctor will need to be run at aot method load time, for example,
5354 * or at the end of the compilation of the inlining method.
5356 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5360 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5361 if (mono_arch_is_soft_float ()) {
5363 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5365 for (i = 0; i < sig->param_count; ++i)
5366 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5371 if (g_list_find (cfg->dont_inline, method))
5378 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5380 if (!cfg->compile_aot) {
5382 if (vtable->initialized)
5386 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5387 if (cfg->method == method)
5391 if (!mono_class_needs_cctor_run (klass, method))
5394 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5395 /* The initialization is already done before the method is called */
5402 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5406 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5409 if (mini_is_gsharedvt_variable_klass (klass)) {
5412 mono_class_init (klass);
5413 size = mono_class_array_element_size (klass);
5416 mult_reg = alloc_preg (cfg);
5417 array_reg = arr->dreg;
5418 index_reg = index->dreg;
5420 #if SIZEOF_REGISTER == 8
5421 /* The array reg is 64 bits but the index reg is only 32 */
5422 if (COMPILE_LLVM (cfg)) {
5424 index2_reg = index_reg;
5426 index2_reg = alloc_preg (cfg);
5427 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5430 if (index->type == STACK_I8) {
5431 index2_reg = alloc_preg (cfg);
5432 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5434 index2_reg = index_reg;
5439 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5441 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5442 if (size == 1 || size == 2 || size == 4 || size == 8) {
5443 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5445 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5446 ins->klass = mono_class_get_element_class (klass);
5447 ins->type = STACK_MP;
5453 add_reg = alloc_ireg_mp (cfg);
5456 MonoInst *rgctx_ins;
5459 g_assert (cfg->gshared);
5460 context_used = mini_class_check_context_used (cfg, klass);
5461 g_assert (context_used);
5462 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5463 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5467 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5468 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5469 ins->klass = mono_class_get_element_class (klass);
5470 ins->type = STACK_MP;
5471 MONO_ADD_INS (cfg->cbb, ins);
5477 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5479 int bounds_reg = alloc_preg (cfg);
5480 int add_reg = alloc_ireg_mp (cfg);
5481 int mult_reg = alloc_preg (cfg);
5482 int mult2_reg = alloc_preg (cfg);
5483 int low1_reg = alloc_preg (cfg);
5484 int low2_reg = alloc_preg (cfg);
5485 int high1_reg = alloc_preg (cfg);
5486 int high2_reg = alloc_preg (cfg);
5487 int realidx1_reg = alloc_preg (cfg);
5488 int realidx2_reg = alloc_preg (cfg);
5489 int sum_reg = alloc_preg (cfg);
5490 int index1, index2, tmpreg;
5494 mono_class_init (klass);
5495 size = mono_class_array_element_size (klass);
5497 index1 = index_ins1->dreg;
5498 index2 = index_ins2->dreg;
5500 #if SIZEOF_REGISTER == 8
5501 /* The array reg is 64 bits but the index reg is only 32 */
5502 if (COMPILE_LLVM (cfg)) {
5505 tmpreg = alloc_preg (cfg);
5506 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5508 tmpreg = alloc_preg (cfg);
5509 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5513 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5517 /* range checking */
5518 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5519 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5521 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5522 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5523 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5524 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5525 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5526 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5527 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5529 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5530 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5531 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5532 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5533 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5534 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5535 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5537 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5538 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5540 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5541 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5543 ins->type = STACK_MP;
5545 MONO_ADD_INS (cfg->cbb, ins);
5551 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5555 MonoMethod *addr_method;
5557 MonoClass *eclass = cmethod->klass->element_class;
5559 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5562 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5564 /* emit_ldelema_2 depends on OP_LMUL */
5565 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5566 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5569 if (mini_is_gsharedvt_variable_klass (eclass))
5572 element_size = mono_class_array_element_size (eclass);
5573 addr_method = mono_marshal_get_array_address (rank, element_size);
5574 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5579 static MonoBreakPolicy
5580 always_insert_breakpoint (MonoMethod *method)
5582 return MONO_BREAK_POLICY_ALWAYS;
5585 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5588 * mono_set_break_policy:
5589 * policy_callback: the new callback function
5591 * Allow embedders to decide wherther to actually obey breakpoint instructions
5592 * (both break IL instructions and Debugger.Break () method calls), for example
5593 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5594 * untrusted or semi-trusted code.
5596 * @policy_callback will be called every time a break point instruction needs to
5597 * be inserted with the method argument being the method that calls Debugger.Break()
5598 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5599 * if it wants the breakpoint to not be effective in the given method.
5600 * #MONO_BREAK_POLICY_ALWAYS is the default.
5603 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5605 if (policy_callback)
5606 break_policy_func = policy_callback;
5608 break_policy_func = always_insert_breakpoint;
5612 should_insert_brekpoint (MonoMethod *method) {
5613 switch (break_policy_func (method)) {
5614 case MONO_BREAK_POLICY_ALWAYS:
5616 case MONO_BREAK_POLICY_NEVER:
5618 case MONO_BREAK_POLICY_ON_DBG:
5619 g_warning ("mdb no longer supported");
5622 g_warning ("Incorrect value returned from break policy callback");
5627 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5629 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5631 MonoInst *addr, *store, *load;
5632 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5634 /* the bounds check is already done by the callers */
5635 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5637 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5638 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5639 if (mini_type_is_reference (fsig->params [2]))
5640 emit_write_barrier (cfg, addr, load);
5642 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5643 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5650 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5652 return mini_type_is_reference (&klass->byval_arg);
5656 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5658 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5659 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5660 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5661 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5662 MonoInst *iargs [3];
5665 mono_class_setup_vtable (obj_array);
5666 g_assert (helper->slot);
5668 if (sp [0]->type != STACK_OBJ)
5670 if (sp [2]->type != STACK_OBJ)
5677 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5681 if (mini_is_gsharedvt_variable_klass (klass)) {
5684 // FIXME-VT: OP_ICONST optimization
5685 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5686 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5687 ins->opcode = OP_STOREV_MEMBASE;
5688 } else if (sp [1]->opcode == OP_ICONST) {
5689 int array_reg = sp [0]->dreg;
5690 int index_reg = sp [1]->dreg;
5691 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5693 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5694 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5697 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5698 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5700 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5701 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5702 if (generic_class_is_reference_type (cfg, klass))
5703 emit_write_barrier (cfg, addr, sp [2]);
5710 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5715 eklass = mono_class_from_mono_type (fsig->params [2]);
5717 eklass = mono_class_from_mono_type (fsig->ret);
5720 return emit_array_store (cfg, eklass, args, FALSE);
5722 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5723 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5729 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5732 int param_size, return_size;
5734 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5735 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5737 if (cfg->verbose_level > 3)
5738 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5740 //Don't allow mixing reference types with value types
5741 if (param_klass->valuetype != return_klass->valuetype) {
5742 if (cfg->verbose_level > 3)
5743 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5747 if (!param_klass->valuetype) {
5748 if (cfg->verbose_level > 3)
5749 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5754 if (param_klass->has_references || return_klass->has_references)
5757 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5758 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5759 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5760 if (cfg->verbose_level > 3)
5761 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5765 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5766 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5767 if (cfg->verbose_level > 3)
5768 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5772 param_size = mono_class_value_size (param_klass, &align);
5773 return_size = mono_class_value_size (return_klass, &align);
5775 //We can do it if sizes match
5776 if (param_size == return_size) {
5777 if (cfg->verbose_level > 3)
5778 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5782 //No simple way to handle struct if sizes don't match
5783 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5784 if (cfg->verbose_level > 3)
5785 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5790 * Same reg size category.
5791 * A quick note on why we don't require widening here.
5792 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5794 * Since the source value comes from a function argument, the JIT will already have
5795 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5797 if (param_size <= 4 && return_size <= 4) {
5798 if (cfg->verbose_level > 3)
5799 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5807 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5809 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5810 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5812 if (mini_is_gsharedvt_variable_type (fsig->ret))
5815 //Valuetypes that are semantically equivalent or numbers than can be widened to
5816 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5819 //Arrays of valuetypes that are semantically equivalent
5820 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5827 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5829 #ifdef MONO_ARCH_SIMD_INTRINSICS
5830 MonoInst *ins = NULL;
5832 if (cfg->opt & MONO_OPT_SIMD) {
5833 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5839 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5843 emit_memory_barrier (MonoCompile *cfg, int kind)
5845 MonoInst *ins = NULL;
5846 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5847 MONO_ADD_INS (cfg->cbb, ins);
5848 ins->backend.memory_barrier_kind = kind;
5854 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5856 MonoInst *ins = NULL;
5859 /* The LLVM backend supports these intrinsics */
5860 if (cmethod->klass == mono_defaults.math_class) {
5861 if (strcmp (cmethod->name, "Sin") == 0) {
5863 } else if (strcmp (cmethod->name, "Cos") == 0) {
5865 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5867 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5871 if (opcode && fsig->param_count == 1) {
5872 MONO_INST_NEW (cfg, ins, opcode);
5873 ins->type = STACK_R8;
5874 ins->dreg = mono_alloc_freg (cfg);
5875 ins->sreg1 = args [0]->dreg;
5876 MONO_ADD_INS (cfg->cbb, ins);
5880 if (cfg->opt & MONO_OPT_CMOV) {
5881 if (strcmp (cmethod->name, "Min") == 0) {
5882 if (fsig->params [0]->type == MONO_TYPE_I4)
5884 if (fsig->params [0]->type == MONO_TYPE_U4)
5885 opcode = OP_IMIN_UN;
5886 else if (fsig->params [0]->type == MONO_TYPE_I8)
5888 else if (fsig->params [0]->type == MONO_TYPE_U8)
5889 opcode = OP_LMIN_UN;
5890 } else if (strcmp (cmethod->name, "Max") == 0) {
5891 if (fsig->params [0]->type == MONO_TYPE_I4)
5893 if (fsig->params [0]->type == MONO_TYPE_U4)
5894 opcode = OP_IMAX_UN;
5895 else if (fsig->params [0]->type == MONO_TYPE_I8)
5897 else if (fsig->params [0]->type == MONO_TYPE_U8)
5898 opcode = OP_LMAX_UN;
5902 if (opcode && fsig->param_count == 2) {
5903 MONO_INST_NEW (cfg, ins, opcode);
5904 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5905 ins->dreg = mono_alloc_ireg (cfg);
5906 ins->sreg1 = args [0]->dreg;
5907 ins->sreg2 = args [1]->dreg;
5908 MONO_ADD_INS (cfg->cbb, ins);
5916 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5918 if (cmethod->klass == mono_defaults.array_class) {
5919 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5920 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5921 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5922 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5923 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5924 return emit_array_unsafe_mov (cfg, fsig, args);
5931 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5933 MonoInst *ins = NULL;
5935 static MonoClass *runtime_helpers_class = NULL;
5936 if (! runtime_helpers_class)
5937 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5938 "System.Runtime.CompilerServices", "RuntimeHelpers");
5940 if (cmethod->klass == mono_defaults.string_class) {
5941 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5942 int dreg = alloc_ireg (cfg);
5943 int index_reg = alloc_preg (cfg);
5944 int add_reg = alloc_preg (cfg);
5946 #if SIZEOF_REGISTER == 8
5947 if (COMPILE_LLVM (cfg)) {
5948 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5950 /* The array reg is 64 bits but the index reg is only 32 */
5951 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5954 index_reg = args [1]->dreg;
5956 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5958 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5959 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5960 add_reg = ins->dreg;
5961 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5964 int mult_reg = alloc_preg (cfg);
5965 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5966 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5967 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5968 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5970 type_from_op (cfg, ins, NULL, NULL);
5972 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5973 int dreg = alloc_ireg (cfg);
5974 /* Decompose later to allow more optimizations */
5975 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5976 ins->type = STACK_I4;
5977 ins->flags |= MONO_INST_FAULT;
5978 cfg->cbb->has_array_access = TRUE;
5979 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5984 } else if (cmethod->klass == mono_defaults.object_class) {
5985 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5986 int dreg = alloc_ireg_ref (cfg);
5987 int vt_reg = alloc_preg (cfg);
5988 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5989 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5990 type_from_op (cfg, ins, NULL, NULL);
5993 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5994 int dreg = alloc_ireg (cfg);
5995 int t1 = alloc_ireg (cfg);
5997 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5998 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5999 ins->type = STACK_I4;
6002 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6003 MONO_INST_NEW (cfg, ins, OP_NOP);
6004 MONO_ADD_INS (cfg->cbb, ins);
6008 } else if (cmethod->klass == mono_defaults.array_class) {
6009 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6010 return emit_array_generic_access (cfg, fsig, args, FALSE);
6011 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6012 return emit_array_generic_access (cfg, fsig, args, TRUE);
6014 #ifndef MONO_BIG_ARRAYS
6016 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6019 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6020 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6021 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6022 int dreg = alloc_ireg (cfg);
6023 int bounds_reg = alloc_ireg_mp (cfg);
6024 MonoBasicBlock *end_bb, *szarray_bb;
6025 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6027 NEW_BBLOCK (cfg, end_bb);
6028 NEW_BBLOCK (cfg, szarray_bb);
6030 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6031 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6032 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6033 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6034 /* Non-szarray case */
6036 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6037 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6039 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6040 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6041 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6042 MONO_START_BB (cfg, szarray_bb);
6045 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6046 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6048 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6049 MONO_START_BB (cfg, end_bb);
6051 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6052 ins->type = STACK_I4;
6058 if (cmethod->name [0] != 'g')
6061 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6062 int dreg = alloc_ireg (cfg);
6063 int vtable_reg = alloc_preg (cfg);
6064 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6065 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6066 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6067 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6068 type_from_op (cfg, ins, NULL, NULL);
6071 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6072 int dreg = alloc_ireg (cfg);
6074 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6075 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6076 type_from_op (cfg, ins, NULL, NULL);
6081 } else if (cmethod->klass == runtime_helpers_class) {
6082 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6083 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6087 } else if (cmethod->klass == mono_defaults.monitor_class) {
6088 gboolean is_enter = FALSE;
6089 gboolean is_v4 = FALSE;
6091 if (!strcmp (cmethod->name, "enter_with_atomic_var") && mono_method_signature (cmethod)->param_count == 2) {
6095 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6100 * To make async stack traces work, icalls which can block should have a wrapper.
6101 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6103 MonoBasicBlock *end_bb;
6105 NEW_BBLOCK (cfg, end_bb);
6107 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
6108 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6109 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6110 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
6111 MONO_START_BB (cfg, end_bb);
6114 } else if (cmethod->klass == mono_defaults.thread_class) {
6115 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6116 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6117 MONO_ADD_INS (cfg->cbb, ins);
6119 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6120 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6121 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6123 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6125 if (fsig->params [0]->type == MONO_TYPE_I1)
6126 opcode = OP_LOADI1_MEMBASE;
6127 else if (fsig->params [0]->type == MONO_TYPE_U1)
6128 opcode = OP_LOADU1_MEMBASE;
6129 else if (fsig->params [0]->type == MONO_TYPE_I2)
6130 opcode = OP_LOADI2_MEMBASE;
6131 else if (fsig->params [0]->type == MONO_TYPE_U2)
6132 opcode = OP_LOADU2_MEMBASE;
6133 else if (fsig->params [0]->type == MONO_TYPE_I4)
6134 opcode = OP_LOADI4_MEMBASE;
6135 else if (fsig->params [0]->type == MONO_TYPE_U4)
6136 opcode = OP_LOADU4_MEMBASE;
6137 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6138 opcode = OP_LOADI8_MEMBASE;
6139 else if (fsig->params [0]->type == MONO_TYPE_R4)
6140 opcode = OP_LOADR4_MEMBASE;
6141 else if (fsig->params [0]->type == MONO_TYPE_R8)
6142 opcode = OP_LOADR8_MEMBASE;
6143 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6144 opcode = OP_LOAD_MEMBASE;
6147 MONO_INST_NEW (cfg, ins, opcode);
6148 ins->inst_basereg = args [0]->dreg;
6149 ins->inst_offset = 0;
6150 MONO_ADD_INS (cfg->cbb, ins);
6152 switch (fsig->params [0]->type) {
6159 ins->dreg = mono_alloc_ireg (cfg);
6160 ins->type = STACK_I4;
6164 ins->dreg = mono_alloc_lreg (cfg);
6165 ins->type = STACK_I8;
6169 ins->dreg = mono_alloc_ireg (cfg);
6170 #if SIZEOF_REGISTER == 8
6171 ins->type = STACK_I8;
6173 ins->type = STACK_I4;
6178 ins->dreg = mono_alloc_freg (cfg);
6179 ins->type = STACK_R8;
6182 g_assert (mini_type_is_reference (fsig->params [0]));
6183 ins->dreg = mono_alloc_ireg_ref (cfg);
6184 ins->type = STACK_OBJ;
6188 if (opcode == OP_LOADI8_MEMBASE)
6189 ins = mono_decompose_opcode (cfg, ins);
6191 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6195 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6197 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6199 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6200 opcode = OP_STOREI1_MEMBASE_REG;
6201 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6202 opcode = OP_STOREI2_MEMBASE_REG;
6203 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6204 opcode = OP_STOREI4_MEMBASE_REG;
6205 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6206 opcode = OP_STOREI8_MEMBASE_REG;
6207 else if (fsig->params [0]->type == MONO_TYPE_R4)
6208 opcode = OP_STORER4_MEMBASE_REG;
6209 else if (fsig->params [0]->type == MONO_TYPE_R8)
6210 opcode = OP_STORER8_MEMBASE_REG;
6211 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6212 opcode = OP_STORE_MEMBASE_REG;
6215 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6217 MONO_INST_NEW (cfg, ins, opcode);
6218 ins->sreg1 = args [1]->dreg;
6219 ins->inst_destbasereg = args [0]->dreg;
6220 ins->inst_offset = 0;
6221 MONO_ADD_INS (cfg->cbb, ins);
6223 if (opcode == OP_STOREI8_MEMBASE_REG)
6224 ins = mono_decompose_opcode (cfg, ins);
6229 } else if (cmethod->klass->image == mono_defaults.corlib &&
6230 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6231 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6234 #if SIZEOF_REGISTER == 8
6235 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6236 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6237 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6238 ins->dreg = mono_alloc_preg (cfg);
6239 ins->sreg1 = args [0]->dreg;
6240 ins->type = STACK_I8;
6241 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6242 MONO_ADD_INS (cfg->cbb, ins);
6246 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6248 /* 64 bit reads are already atomic */
6249 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6250 load_ins->dreg = mono_alloc_preg (cfg);
6251 load_ins->inst_basereg = args [0]->dreg;
6252 load_ins->inst_offset = 0;
6253 load_ins->type = STACK_I8;
6254 MONO_ADD_INS (cfg->cbb, load_ins);
6256 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6263 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6264 MonoInst *ins_iconst;
6267 if (fsig->params [0]->type == MONO_TYPE_I4) {
6268 opcode = OP_ATOMIC_ADD_I4;
6269 cfg->has_atomic_add_i4 = TRUE;
6271 #if SIZEOF_REGISTER == 8
6272 else if (fsig->params [0]->type == MONO_TYPE_I8)
6273 opcode = OP_ATOMIC_ADD_I8;
6276 if (!mono_arch_opcode_supported (opcode))
6278 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6279 ins_iconst->inst_c0 = 1;
6280 ins_iconst->dreg = mono_alloc_ireg (cfg);
6281 MONO_ADD_INS (cfg->cbb, ins_iconst);
6283 MONO_INST_NEW (cfg, ins, opcode);
6284 ins->dreg = mono_alloc_ireg (cfg);
6285 ins->inst_basereg = args [0]->dreg;
6286 ins->inst_offset = 0;
6287 ins->sreg2 = ins_iconst->dreg;
6288 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6289 MONO_ADD_INS (cfg->cbb, ins);
6291 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6292 MonoInst *ins_iconst;
6295 if (fsig->params [0]->type == MONO_TYPE_I4) {
6296 opcode = OP_ATOMIC_ADD_I4;
6297 cfg->has_atomic_add_i4 = TRUE;
6299 #if SIZEOF_REGISTER == 8
6300 else if (fsig->params [0]->type == MONO_TYPE_I8)
6301 opcode = OP_ATOMIC_ADD_I8;
6304 if (!mono_arch_opcode_supported (opcode))
6306 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6307 ins_iconst->inst_c0 = -1;
6308 ins_iconst->dreg = mono_alloc_ireg (cfg);
6309 MONO_ADD_INS (cfg->cbb, ins_iconst);
6311 MONO_INST_NEW (cfg, ins, opcode);
6312 ins->dreg = mono_alloc_ireg (cfg);
6313 ins->inst_basereg = args [0]->dreg;
6314 ins->inst_offset = 0;
6315 ins->sreg2 = ins_iconst->dreg;
6316 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6317 MONO_ADD_INS (cfg->cbb, ins);
6319 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6322 if (fsig->params [0]->type == MONO_TYPE_I4) {
6323 opcode = OP_ATOMIC_ADD_I4;
6324 cfg->has_atomic_add_i4 = TRUE;
6326 #if SIZEOF_REGISTER == 8
6327 else if (fsig->params [0]->type == MONO_TYPE_I8)
6328 opcode = OP_ATOMIC_ADD_I8;
6331 if (!mono_arch_opcode_supported (opcode))
6333 MONO_INST_NEW (cfg, ins, opcode);
6334 ins->dreg = mono_alloc_ireg (cfg);
6335 ins->inst_basereg = args [0]->dreg;
6336 ins->inst_offset = 0;
6337 ins->sreg2 = args [1]->dreg;
6338 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6339 MONO_ADD_INS (cfg->cbb, ins);
6342 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6343 MonoInst *f2i = NULL, *i2f;
6344 guint32 opcode, f2i_opcode, i2f_opcode;
6345 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6346 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6348 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6349 fsig->params [0]->type == MONO_TYPE_R4) {
6350 opcode = OP_ATOMIC_EXCHANGE_I4;
6351 f2i_opcode = OP_MOVE_F_TO_I4;
6352 i2f_opcode = OP_MOVE_I4_TO_F;
6353 cfg->has_atomic_exchange_i4 = TRUE;
6355 #if SIZEOF_REGISTER == 8
6357 fsig->params [0]->type == MONO_TYPE_I8 ||
6358 fsig->params [0]->type == MONO_TYPE_R8 ||
6359 fsig->params [0]->type == MONO_TYPE_I) {
6360 opcode = OP_ATOMIC_EXCHANGE_I8;
6361 f2i_opcode = OP_MOVE_F_TO_I8;
6362 i2f_opcode = OP_MOVE_I8_TO_F;
6365 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6366 opcode = OP_ATOMIC_EXCHANGE_I4;
6367 cfg->has_atomic_exchange_i4 = TRUE;
6373 if (!mono_arch_opcode_supported (opcode))
6377 /* TODO: Decompose these opcodes instead of bailing here. */
6378 if (COMPILE_SOFT_FLOAT (cfg))
6381 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6382 f2i->dreg = mono_alloc_ireg (cfg);
6383 f2i->sreg1 = args [1]->dreg;
6384 if (f2i_opcode == OP_MOVE_F_TO_I4)
6385 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6386 MONO_ADD_INS (cfg->cbb, f2i);
6389 MONO_INST_NEW (cfg, ins, opcode);
6390 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6391 ins->inst_basereg = args [0]->dreg;
6392 ins->inst_offset = 0;
6393 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6394 MONO_ADD_INS (cfg->cbb, ins);
6396 switch (fsig->params [0]->type) {
6398 ins->type = STACK_I4;
6401 ins->type = STACK_I8;
6404 #if SIZEOF_REGISTER == 8
6405 ins->type = STACK_I8;
6407 ins->type = STACK_I4;
6412 ins->type = STACK_R8;
6415 g_assert (mini_type_is_reference (fsig->params [0]));
6416 ins->type = STACK_OBJ;
6421 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6422 i2f->dreg = mono_alloc_freg (cfg);
6423 i2f->sreg1 = ins->dreg;
6424 i2f->type = STACK_R8;
6425 if (i2f_opcode == OP_MOVE_I4_TO_F)
6426 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6427 MONO_ADD_INS (cfg->cbb, i2f);
6432 if (cfg->gen_write_barriers && is_ref)
6433 emit_write_barrier (cfg, args [0], args [1]);
6435 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6436 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6437 guint32 opcode, f2i_opcode, i2f_opcode;
6438 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6439 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6441 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6442 fsig->params [1]->type == MONO_TYPE_R4) {
6443 opcode = OP_ATOMIC_CAS_I4;
6444 f2i_opcode = OP_MOVE_F_TO_I4;
6445 i2f_opcode = OP_MOVE_I4_TO_F;
6446 cfg->has_atomic_cas_i4 = TRUE;
6448 #if SIZEOF_REGISTER == 8
6450 fsig->params [1]->type == MONO_TYPE_I8 ||
6451 fsig->params [1]->type == MONO_TYPE_R8 ||
6452 fsig->params [1]->type == MONO_TYPE_I) {
6453 opcode = OP_ATOMIC_CAS_I8;
6454 f2i_opcode = OP_MOVE_F_TO_I8;
6455 i2f_opcode = OP_MOVE_I8_TO_F;
6458 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6459 opcode = OP_ATOMIC_CAS_I4;
6460 cfg->has_atomic_cas_i4 = TRUE;
6466 if (!mono_arch_opcode_supported (opcode))
6470 /* TODO: Decompose these opcodes instead of bailing here. */
6471 if (COMPILE_SOFT_FLOAT (cfg))
6474 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6475 f2i_new->dreg = mono_alloc_ireg (cfg);
6476 f2i_new->sreg1 = args [1]->dreg;
6477 if (f2i_opcode == OP_MOVE_F_TO_I4)
6478 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6479 MONO_ADD_INS (cfg->cbb, f2i_new);
6481 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6482 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6483 f2i_cmp->sreg1 = args [2]->dreg;
6484 if (f2i_opcode == OP_MOVE_F_TO_I4)
6485 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6486 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6489 MONO_INST_NEW (cfg, ins, opcode);
6490 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6491 ins->sreg1 = args [0]->dreg;
6492 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6493 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6494 MONO_ADD_INS (cfg->cbb, ins);
6496 switch (fsig->params [1]->type) {
6498 ins->type = STACK_I4;
6501 ins->type = STACK_I8;
6504 #if SIZEOF_REGISTER == 8
6505 ins->type = STACK_I8;
6507 ins->type = STACK_I4;
6511 ins->type = cfg->r4_stack_type;
6514 ins->type = STACK_R8;
6517 g_assert (mini_type_is_reference (fsig->params [1]));
6518 ins->type = STACK_OBJ;
6523 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6524 i2f->dreg = mono_alloc_freg (cfg);
6525 i2f->sreg1 = ins->dreg;
6526 i2f->type = STACK_R8;
6527 if (i2f_opcode == OP_MOVE_I4_TO_F)
6528 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6529 MONO_ADD_INS (cfg->cbb, i2f);
6534 if (cfg->gen_write_barriers && is_ref)
6535 emit_write_barrier (cfg, args [0], args [1]);
6537 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6538 fsig->params [1]->type == MONO_TYPE_I4) {
6539 MonoInst *cmp, *ceq;
6541 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6544 /* int32 r = CAS (location, value, comparand); */
6545 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6546 ins->dreg = alloc_ireg (cfg);
6547 ins->sreg1 = args [0]->dreg;
6548 ins->sreg2 = args [1]->dreg;
6549 ins->sreg3 = args [2]->dreg;
6550 ins->type = STACK_I4;
6551 MONO_ADD_INS (cfg->cbb, ins);
6553 /* bool result = r == comparand; */
6554 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6555 cmp->sreg1 = ins->dreg;
6556 cmp->sreg2 = args [2]->dreg;
6557 cmp->type = STACK_I4;
6558 MONO_ADD_INS (cfg->cbb, cmp);
6560 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6561 ceq->dreg = alloc_ireg (cfg);
6562 ceq->type = STACK_I4;
6563 MONO_ADD_INS (cfg->cbb, ceq);
6565 /* *success = result; */
6566 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6568 cfg->has_atomic_cas_i4 = TRUE;
6570 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6571 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6575 } else if (cmethod->klass->image == mono_defaults.corlib &&
6576 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6577 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6580 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6582 MonoType *t = fsig->params [0];
6584 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6586 g_assert (t->byref);
6587 /* t is a byref type, so the reference check is more complicated */
6588 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6589 if (t->type == MONO_TYPE_I1)
6590 opcode = OP_ATOMIC_LOAD_I1;
6591 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6592 opcode = OP_ATOMIC_LOAD_U1;
6593 else if (t->type == MONO_TYPE_I2)
6594 opcode = OP_ATOMIC_LOAD_I2;
6595 else if (t->type == MONO_TYPE_U2)
6596 opcode = OP_ATOMIC_LOAD_U2;
6597 else if (t->type == MONO_TYPE_I4)
6598 opcode = OP_ATOMIC_LOAD_I4;
6599 else if (t->type == MONO_TYPE_U4)
6600 opcode = OP_ATOMIC_LOAD_U4;
6601 else if (t->type == MONO_TYPE_R4)
6602 opcode = OP_ATOMIC_LOAD_R4;
6603 else if (t->type == MONO_TYPE_R8)
6604 opcode = OP_ATOMIC_LOAD_R8;
6605 #if SIZEOF_REGISTER == 8
6606 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6607 opcode = OP_ATOMIC_LOAD_I8;
6608 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6609 opcode = OP_ATOMIC_LOAD_U8;
6611 else if (t->type == MONO_TYPE_I)
6612 opcode = OP_ATOMIC_LOAD_I4;
6613 else if (is_ref || t->type == MONO_TYPE_U)
6614 opcode = OP_ATOMIC_LOAD_U4;
6618 if (!mono_arch_opcode_supported (opcode))
6621 MONO_INST_NEW (cfg, ins, opcode);
6622 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6623 ins->sreg1 = args [0]->dreg;
6624 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6625 MONO_ADD_INS (cfg->cbb, ins);
6628 case MONO_TYPE_BOOLEAN:
6635 ins->type = STACK_I4;
6639 ins->type = STACK_I8;
6643 #if SIZEOF_REGISTER == 8
6644 ins->type = STACK_I8;
6646 ins->type = STACK_I4;
6650 ins->type = cfg->r4_stack_type;
6653 ins->type = STACK_R8;
6657 ins->type = STACK_OBJ;
6663 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6665 MonoType *t = fsig->params [0];
6668 g_assert (t->byref);
6669 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6670 if (t->type == MONO_TYPE_I1)
6671 opcode = OP_ATOMIC_STORE_I1;
6672 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6673 opcode = OP_ATOMIC_STORE_U1;
6674 else if (t->type == MONO_TYPE_I2)
6675 opcode = OP_ATOMIC_STORE_I2;
6676 else if (t->type == MONO_TYPE_U2)
6677 opcode = OP_ATOMIC_STORE_U2;
6678 else if (t->type == MONO_TYPE_I4)
6679 opcode = OP_ATOMIC_STORE_I4;
6680 else if (t->type == MONO_TYPE_U4)
6681 opcode = OP_ATOMIC_STORE_U4;
6682 else if (t->type == MONO_TYPE_R4)
6683 opcode = OP_ATOMIC_STORE_R4;
6684 else if (t->type == MONO_TYPE_R8)
6685 opcode = OP_ATOMIC_STORE_R8;
6686 #if SIZEOF_REGISTER == 8
6687 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6688 opcode = OP_ATOMIC_STORE_I8;
6689 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6690 opcode = OP_ATOMIC_STORE_U8;
6692 else if (t->type == MONO_TYPE_I)
6693 opcode = OP_ATOMIC_STORE_I4;
6694 else if (is_ref || t->type == MONO_TYPE_U)
6695 opcode = OP_ATOMIC_STORE_U4;
6699 if (!mono_arch_opcode_supported (opcode))
6702 MONO_INST_NEW (cfg, ins, opcode);
6703 ins->dreg = args [0]->dreg;
6704 ins->sreg1 = args [1]->dreg;
6705 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6706 MONO_ADD_INS (cfg->cbb, ins);
6708 if (cfg->gen_write_barriers && is_ref)
6709 emit_write_barrier (cfg, args [0], args [1]);
6715 } else if (cmethod->klass->image == mono_defaults.corlib &&
6716 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6717 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6718 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6719 if (should_insert_brekpoint (cfg->method)) {
6720 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6722 MONO_INST_NEW (cfg, ins, OP_NOP);
6723 MONO_ADD_INS (cfg->cbb, ins);
6727 } else if (cmethod->klass->image == mono_defaults.corlib &&
6728 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6729 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6730 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6732 EMIT_NEW_ICONST (cfg, ins, 1);
6734 EMIT_NEW_ICONST (cfg, ins, 0);
6737 } else if (cmethod->klass->image == mono_defaults.corlib &&
6738 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6739 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6740 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6741 /* No stack walks are currently available, so implement this as an intrinsic */
6742 MonoInst *assembly_ins;
6744 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6745 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6748 } else if (cmethod->klass->image == mono_defaults.corlib &&
6749 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6750 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6751 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6752 /* No stack walks are currently available, so implement this as an intrinsic */
6753 MonoInst *method_ins;
6754 MonoMethod *declaring = cfg->method;
6756 /* This returns the declaring generic method */
6757 if (declaring->is_inflated)
6758 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6759 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6760 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6761 cfg->no_inline = TRUE;
6762 if (cfg->method != cfg->current_method)
6763 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6766 } else if (cmethod->klass == mono_defaults.math_class) {
6768 * There is general branchless code for Min/Max, but it does not work for
6770 * http://everything2.com/?node_id=1051618
6772 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6773 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6774 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6775 !strcmp (cmethod->klass->name, "Selector")) ||
6776 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6777 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6778 !strcmp (cmethod->klass->name, "Selector"))
6780 if (cfg->backend->have_objc_get_selector &&
6781 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6782 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6783 cfg->compile_aot && !cfg->llvm_only) {
6785 MonoJumpInfoToken *ji;
6790 cfg->exception_message = g_strdup ("GetHandle");
6791 cfg->disable_llvm = TRUE;
6793 if (args [0]->opcode == OP_GOT_ENTRY) {
6794 pi = (MonoInst *)args [0]->inst_p1;
6795 g_assert (pi->opcode == OP_PATCH_INFO);
6796 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6797 ji = (MonoJumpInfoToken *)pi->inst_p0;
6799 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6800 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6803 NULLIFY_INS (args [0]);
6806 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6807 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6808 ins->dreg = mono_alloc_ireg (cfg);
6810 ins->inst_p0 = mono_string_to_utf8 (s);
6811 MONO_ADD_INS (cfg->cbb, ins);
6816 #ifdef MONO_ARCH_SIMD_INTRINSICS
6817 if (cfg->opt & MONO_OPT_SIMD) {
6818 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6824 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6828 if (COMPILE_LLVM (cfg)) {
6829 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6834 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6838 * This entry point could be used later for arbitrary method
6841 inline static MonoInst*
6842 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6843 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6845 if (method->klass == mono_defaults.string_class) {
6846 /* managed string allocation support */
6847 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6848 MonoInst *iargs [2];
6849 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6850 MonoMethod *managed_alloc = NULL;
6852 g_assert (vtable); /*Should not fail since it System.String*/
6853 #ifndef MONO_CROSS_COMPILE
6854 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6858 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6859 iargs [1] = args [0];
6860 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6867 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6869 MonoInst *store, *temp;
6872 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6873 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6876 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6877 * would be different than the MonoInst's used to represent arguments, and
6878 * the ldelema implementation can't deal with that.
6879 * Solution: When ldelema is used on an inline argument, create a var for
6880 * it, emit ldelema on that var, and emit the saving code below in
6881 * inline_method () if needed.
6883 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6884 cfg->args [i] = temp;
6885 /* This uses cfg->args [i] which is set by the preceeding line */
6886 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6887 store->cil_code = sp [0]->cil_code;
6892 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6893 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6895 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6897 check_inline_called_method_name_limit (MonoMethod *called_method)
6900 static const char *limit = NULL;
6902 if (limit == NULL) {
6903 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6905 if (limit_string != NULL)
6906 limit = limit_string;
6911 if (limit [0] != '\0') {
6912 char *called_method_name = mono_method_full_name (called_method, TRUE);
6914 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6915 g_free (called_method_name);
6917 //return (strncmp_result <= 0);
6918 return (strncmp_result == 0);
6925 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6927 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6930 static const char *limit = NULL;
6932 if (limit == NULL) {
6933 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6934 if (limit_string != NULL) {
6935 limit = limit_string;
6941 if (limit [0] != '\0') {
6942 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6944 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6945 g_free (caller_method_name);
6947 //return (strncmp_result <= 0);
6948 return (strncmp_result == 0);
6956 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6958 static double r8_0 = 0.0;
6959 static float r4_0 = 0.0;
6963 rtype = mini_get_underlying_type (rtype);
6967 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6968 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6969 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6970 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6971 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6972 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6973 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6974 ins->type = STACK_R4;
6975 ins->inst_p0 = (void*)&r4_0;
6977 MONO_ADD_INS (cfg->cbb, ins);
6978 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6979 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6980 ins->type = STACK_R8;
6981 ins->inst_p0 = (void*)&r8_0;
6983 MONO_ADD_INS (cfg->cbb, ins);
6984 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6985 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6986 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6987 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6988 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6990 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6995 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6999 rtype = mini_get_underlying_type (rtype);
7003 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7004 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7005 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7006 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7007 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7008 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7009 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7010 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7011 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7012 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7013 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7014 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7015 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7016 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7018 emit_init_rvar (cfg, dreg, rtype);
7022 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7024 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7026 MonoInst *var = cfg->locals [local];
7027 if (COMPILE_SOFT_FLOAT (cfg)) {
7029 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7030 emit_init_rvar (cfg, reg, type);
7031 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7034 emit_init_rvar (cfg, var->dreg, type);
7036 emit_dummy_init_rvar (cfg, var->dreg, type);
7043 * Return the cost of inlining CMETHOD.
7046 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7047 guchar *ip, guint real_offset, gboolean inline_always)
7049 MonoInst *ins, *rvar = NULL;
7050 MonoMethodHeader *cheader;
7051 MonoBasicBlock *ebblock, *sbblock;
7053 MonoMethod *prev_inlined_method;
7054 MonoInst **prev_locals, **prev_args;
7055 MonoType **prev_arg_types;
7056 guint prev_real_offset;
7057 GHashTable *prev_cbb_hash;
7058 MonoBasicBlock **prev_cil_offset_to_bb;
7059 MonoBasicBlock *prev_cbb;
7060 unsigned char* prev_cil_start;
7061 guint32 prev_cil_offset_to_bb_len;
7062 MonoMethod *prev_current_method;
7063 MonoGenericContext *prev_generic_context;
7064 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7066 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7068 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7069 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7072 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7073 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7078 fsig = mono_method_signature (cmethod);
7080 if (cfg->verbose_level > 2)
7081 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7083 if (!cmethod->inline_info) {
7084 cfg->stat_inlineable_methods++;
7085 cmethod->inline_info = 1;
7088 /* allocate local variables */
7089 cheader = mono_method_get_header (cmethod);
7091 if (cheader == NULL || mono_loader_get_last_error ()) {
7093 mono_metadata_free_mh (cheader);
7094 if (inline_always && mono_loader_get_last_error ()) {
7095 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7096 mono_error_set_from_loader_error (&cfg->error);
7099 mono_loader_clear_error ();
7103 /*Must verify before creating locals as it can cause the JIT to assert.*/
7104 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7105 mono_metadata_free_mh (cheader);
7109 /* allocate space to store the return value */
7110 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7111 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7114 prev_locals = cfg->locals;
7115 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7116 for (i = 0; i < cheader->num_locals; ++i)
7117 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7119 /* allocate start and end blocks */
7120 /* This is needed so if the inline is aborted, we can clean up */
7121 NEW_BBLOCK (cfg, sbblock);
7122 sbblock->real_offset = real_offset;
7124 NEW_BBLOCK (cfg, ebblock);
7125 ebblock->block_num = cfg->num_bblocks++;
7126 ebblock->real_offset = real_offset;
7128 prev_args = cfg->args;
7129 prev_arg_types = cfg->arg_types;
7130 prev_inlined_method = cfg->inlined_method;
7131 cfg->inlined_method = cmethod;
7132 cfg->ret_var_set = FALSE;
7133 cfg->inline_depth ++;
7134 prev_real_offset = cfg->real_offset;
7135 prev_cbb_hash = cfg->cbb_hash;
7136 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7137 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7138 prev_cil_start = cfg->cil_start;
7139 prev_cbb = cfg->cbb;
7140 prev_current_method = cfg->current_method;
7141 prev_generic_context = cfg->generic_context;
7142 prev_ret_var_set = cfg->ret_var_set;
7143 prev_disable_inline = cfg->disable_inline;
7145 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7148 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7150 ret_var_set = cfg->ret_var_set;
7152 cfg->inlined_method = prev_inlined_method;
7153 cfg->real_offset = prev_real_offset;
7154 cfg->cbb_hash = prev_cbb_hash;
7155 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7156 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7157 cfg->cil_start = prev_cil_start;
7158 cfg->locals = prev_locals;
7159 cfg->args = prev_args;
7160 cfg->arg_types = prev_arg_types;
7161 cfg->current_method = prev_current_method;
7162 cfg->generic_context = prev_generic_context;
7163 cfg->ret_var_set = prev_ret_var_set;
7164 cfg->disable_inline = prev_disable_inline;
7165 cfg->inline_depth --;
7167 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7168 if (cfg->verbose_level > 2)
7169 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7171 cfg->stat_inlined_methods++;
7173 /* always add some code to avoid block split failures */
7174 MONO_INST_NEW (cfg, ins, OP_NOP);
7175 MONO_ADD_INS (prev_cbb, ins);
7177 prev_cbb->next_bb = sbblock;
7178 link_bblock (cfg, prev_cbb, sbblock);
7181 * Get rid of the begin and end bblocks if possible to aid local
7184 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7186 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7187 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7189 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7190 MonoBasicBlock *prev = ebblock->in_bb [0];
7192 if (prev->next_bb == ebblock) {
7193 mono_merge_basic_blocks (cfg, prev, ebblock);
7195 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7196 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7197 cfg->cbb = prev_cbb;
7200 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7205 * Its possible that the rvar is set in some prev bblock, but not in others.
7211 for (i = 0; i < ebblock->in_count; ++i) {
7212 bb = ebblock->in_bb [i];
7214 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7217 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7227 * If the inlined method contains only a throw, then the ret var is not
7228 * set, so set it to a dummy value.
7231 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7233 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7236 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7239 if (cfg->verbose_level > 2)
7240 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7241 cfg->exception_type = MONO_EXCEPTION_NONE;
7242 mono_loader_clear_error ();
7244 /* This gets rid of the newly added bblocks */
7245 cfg->cbb = prev_cbb;
7247 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7252 * Some of these comments may well be out-of-date.
7253 * Design decisions: we do a single pass over the IL code (and we do bblock
7254 * splitting/merging in the few cases when it's required: a back jump to an IL
7255 * address that was not already seen as bblock starting point).
7256 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7257 * Complex operations are decomposed in simpler ones right away. We need to let the
7258 * arch-specific code peek and poke inside this process somehow (except when the
7259 * optimizations can take advantage of the full semantic info of coarse opcodes).
7260 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7261 * MonoInst->opcode initially is the IL opcode or some simplification of that
7262 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7263 * opcode with value bigger than OP_LAST.
7264 * At this point the IR can be handed over to an interpreter, a dumb code generator
7265 * or to the optimizing code generator that will translate it to SSA form.
7267 * Profiling directed optimizations.
7268 * We may compile by default with few or no optimizations and instrument the code
7269 * or the user may indicate what methods to optimize the most either in a config file
7270 * or through repeated runs where the compiler applies offline the optimizations to
7271 * each method and then decides if it was worth it.
7274 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7275 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7276 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7277 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7278 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7279 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7280 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7281 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7283 /* offset from br.s -> br like opcodes */
7284 #define BIG_BRANCH_OFFSET 13
7287 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7289 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7291 return b == NULL || b == bb;
7295 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7297 unsigned char *ip = start;
7298 unsigned char *target;
7301 MonoBasicBlock *bblock;
7302 const MonoOpcode *opcode;
7305 cli_addr = ip - start;
7306 i = mono_opcode_value ((const guint8 **)&ip, end);
7309 opcode = &mono_opcodes [i];
7310 switch (opcode->argument) {
7311 case MonoInlineNone:
7314 case MonoInlineString:
7315 case MonoInlineType:
7316 case MonoInlineField:
7317 case MonoInlineMethod:
7320 case MonoShortInlineR:
7327 case MonoShortInlineVar:
7328 case MonoShortInlineI:
7331 case MonoShortInlineBrTarget:
7332 target = start + cli_addr + 2 + (signed char)ip [1];
7333 GET_BBLOCK (cfg, bblock, target);
7336 GET_BBLOCK (cfg, bblock, ip);
7338 case MonoInlineBrTarget:
7339 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7340 GET_BBLOCK (cfg, bblock, target);
7343 GET_BBLOCK (cfg, bblock, ip);
7345 case MonoInlineSwitch: {
7346 guint32 n = read32 (ip + 1);
7349 cli_addr += 5 + 4 * n;
7350 target = start + cli_addr;
7351 GET_BBLOCK (cfg, bblock, target);
7353 for (j = 0; j < n; ++j) {
7354 target = start + cli_addr + (gint32)read32 (ip);
7355 GET_BBLOCK (cfg, bblock, target);
7365 g_assert_not_reached ();
7368 if (i == CEE_THROW) {
7369 unsigned char *bb_start = ip - 1;
7371 /* Find the start of the bblock containing the throw */
7373 while ((bb_start >= start) && !bblock) {
7374 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7378 bblock->out_of_line = 1;
7388 static inline MonoMethod *
7389 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7393 mono_error_init (error);
7395 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7396 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7398 method = mono_class_inflate_generic_method_checked (method, context, error);
7401 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7407 static inline MonoMethod *
7408 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7411 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7413 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7414 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7418 if (!method && !cfg)
7419 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7424 static inline MonoClass*
7425 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7430 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7431 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7433 klass = mono_class_inflate_generic_class (klass, context);
7435 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7436 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7439 mono_class_init (klass);
7443 static inline MonoMethodSignature*
7444 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7446 MonoMethodSignature *fsig;
7448 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7449 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7451 fsig = mono_metadata_parse_signature (method->klass->image, token);
7455 fsig = mono_inflate_generic_signature(fsig, context, &error);
7457 g_assert(mono_error_ok(&error));
7463 throw_exception (void)
7465 static MonoMethod *method = NULL;
7468 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7469 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7476 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7478 MonoMethod *thrower = throw_exception ();
7481 EMIT_NEW_PCONST (cfg, args [0], ex);
7482 mono_emit_method_call (cfg, thrower, args, NULL);
7486 * Return the original method is a wrapper is specified. We can only access
7487 * the custom attributes from the original method.
7490 get_original_method (MonoMethod *method)
7492 if (method->wrapper_type == MONO_WRAPPER_NONE)
7495 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7496 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7499 /* in other cases we need to find the original method */
7500 return mono_marshal_method_from_wrapper (method);
7504 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7506 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7507 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7509 emit_throw_exception (cfg, ex);
7513 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7515 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7516 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7518 emit_throw_exception (cfg, ex);
7522 * Check that the IL instructions at ip are the array initialization
7523 * sequence and return the pointer to the data and the size.
7526 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7529 * newarr[System.Int32]
7531 * ldtoken field valuetype ...
7532 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7534 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7536 guint32 token = read32 (ip + 7);
7537 guint32 field_token = read32 (ip + 2);
7538 guint32 field_index = field_token & 0xffffff;
7540 const char *data_ptr;
7542 MonoMethod *cmethod;
7543 MonoClass *dummy_class;
7544 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7548 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7552 *out_field_token = field_token;
7554 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7557 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7559 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7560 case MONO_TYPE_BOOLEAN:
7564 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7565 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7566 case MONO_TYPE_CHAR:
7583 if (size > mono_type_size (field->type, &dummy_align))
7586 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7587 if (!image_is_dynamic (method->klass->image)) {
7588 field_index = read32 (ip + 2) & 0xffffff;
7589 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7590 data_ptr = mono_image_rva_map (method->klass->image, rva);
7591 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7592 /* for aot code we do the lookup on load */
7593 if (aot && data_ptr)
7594 return (const char *)GUINT_TO_POINTER (rva);
7596 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7598 data_ptr = mono_field_get_data (field);
7606 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7608 char *method_fname = mono_method_full_name (method, TRUE);
7610 MonoMethodHeader *header = mono_method_get_header (method);
7612 if (header->code_size == 0)
7613 method_code = g_strdup ("method body is empty.");
7615 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7616 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7617 g_free (method_fname);
7618 g_free (method_code);
7619 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7623 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7626 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7627 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7628 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7629 /* Optimize reg-reg moves away */
7631 * Can't optimize other opcodes, since sp[0] might point to
7632 * the last ins of a decomposed opcode.
7634 sp [0]->dreg = (cfg)->locals [n]->dreg;
7636 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7641 * ldloca inhibits many optimizations so try to get rid of it in common
7644 static inline unsigned char *
7645 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7655 local = read16 (ip + 2);
7659 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7660 /* From the INITOBJ case */
7661 token = read32 (ip + 2);
7662 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7663 CHECK_TYPELOAD (klass);
7664 type = mini_get_underlying_type (&klass->byval_arg);
7665 emit_init_local (cfg, local, type, TRUE);
7673 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7675 MonoInst *icall_args [16];
7676 MonoInst *call_target, *ins, *vtable_ins;
7677 int arg_reg, this_reg, vtable_reg;
7678 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7679 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7680 gboolean variant_iface = FALSE;
7685 * In llvm-only mode, vtables contain function descriptors instead of
7686 * method addresses/trampolines.
7688 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7691 slot = mono_method_get_imt_slot (cmethod);
7693 slot = mono_method_get_vtable_index (cmethod);
7695 this_reg = sp [0]->dreg;
7697 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7698 variant_iface = TRUE;
7700 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7702 * The simplest case, a normal virtual call.
7704 int slot_reg = alloc_preg (cfg);
7705 int addr_reg = alloc_preg (cfg);
7706 int arg_reg = alloc_preg (cfg);
7707 MonoBasicBlock *non_null_bb;
7709 vtable_reg = alloc_preg (cfg);
7710 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7711 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7713 /* Load the vtable slot, which contains a function descriptor. */
7714 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7716 NEW_BBLOCK (cfg, non_null_bb);
7718 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7719 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7720 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7723 // FIXME: Make the wrapper use the preserveall cconv
7724 // FIXME: Use one icall per slot for small slot numbers ?
7725 icall_args [0] = vtable_ins;
7726 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7727 /* Make the icall return the vtable slot value to save some code space */
7728 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7729 ins->dreg = slot_reg;
7730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7733 MONO_START_BB (cfg, non_null_bb);
7734 /* Load the address + arg from the vtable slot */
7735 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7736 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7738 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7741 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7743 * A simple interface call
7745 * We make a call through an imt slot to obtain the function descriptor we need to call.
7746 * The imt slot contains a function descriptor for a runtime function + arg.
7748 int slot_reg = alloc_preg (cfg);
7749 int addr_reg = alloc_preg (cfg);
7750 int arg_reg = alloc_preg (cfg);
7751 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7753 vtable_reg = alloc_preg (cfg);
7754 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7755 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7758 * The slot is already initialized when the vtable is created so there is no need
7762 /* Load the imt slot, which contains a function descriptor. */
7763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7765 /* Load the address + arg of the imt thunk from the imt slot */
7766 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7767 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7769 * IMT thunks in llvm-only mode are C functions which take an info argument
7770 * plus the imt method and return the ftndesc to call.
7772 icall_args [0] = thunk_arg_ins;
7773 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7774 cmethod, MONO_RGCTX_INFO_METHOD);
7775 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7777 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7780 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7782 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7783 * dynamically extended as more instantiations are discovered.
7784 * This handles generic virtual methods both on classes and interfaces.
7786 int slot_reg = alloc_preg (cfg);
7787 int addr_reg = alloc_preg (cfg);
7788 int arg_reg = alloc_preg (cfg);
7789 int ftndesc_reg = alloc_preg (cfg);
7790 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7791 MonoBasicBlock *slowpath_bb, *end_bb;
7793 NEW_BBLOCK (cfg, slowpath_bb);
7794 NEW_BBLOCK (cfg, end_bb);
7796 vtable_reg = alloc_preg (cfg);
7797 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7799 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7801 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7803 /* Load the slot, which contains a function descriptor. */
7804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7806 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7807 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7809 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7812 /* Same as with iface calls */
7813 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7814 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7815 icall_args [0] = thunk_arg_ins;
7816 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7817 cmethod, MONO_RGCTX_INFO_METHOD);
7818 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7819 ftndesc_ins->dreg = ftndesc_reg;
7821 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7822 * they don't know about yet. Fall back to the slowpath in that case.
7824 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7825 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7827 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7830 MONO_START_BB (cfg, slowpath_bb);
7831 icall_args [0] = vtable_ins;
7832 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7833 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7834 cmethod, MONO_RGCTX_INFO_METHOD);
7836 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7838 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7839 ftndesc_ins->dreg = ftndesc_reg;
7840 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7843 MONO_START_BB (cfg, end_bb);
7844 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7848 * Non-optimized cases
7850 icall_args [0] = sp [0];
7851 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7853 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7854 cmethod, MONO_RGCTX_INFO_METHOD);
7856 arg_reg = alloc_preg (cfg);
7857 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7858 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7860 g_assert (is_gsharedvt);
7862 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7864 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7867 * Pass the extra argument even if the callee doesn't receive it, most
7868 * calling conventions allow this.
7870 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7874 is_exception_class (MonoClass *klass)
7877 if (klass == mono_defaults.exception_class)
7879 klass = klass->parent;
7885 * is_jit_optimizer_disabled:
7887 * Determine whenever M's assembly has a DebuggableAttribute with the
7888 * IsJITOptimizerDisabled flag set.
7891 is_jit_optimizer_disabled (MonoMethod *m)
7893 MonoAssembly *ass = m->klass->image->assembly;
7894 MonoCustomAttrInfo* attrs;
7895 static MonoClass *klass;
7897 gboolean val = FALSE;
7900 if (ass->jit_optimizer_disabled_inited)
7901 return ass->jit_optimizer_disabled;
7904 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7907 ass->jit_optimizer_disabled = FALSE;
7908 mono_memory_barrier ();
7909 ass->jit_optimizer_disabled_inited = TRUE;
7913 attrs = mono_custom_attrs_from_assembly (ass);
7915 for (i = 0; i < attrs->num_attrs; ++i) {
7916 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7918 MonoMethodSignature *sig;
7920 if (!attr->ctor || attr->ctor->klass != klass)
7922 /* Decode the attribute. See reflection.c */
7923 p = (const char*)attr->data;
7924 g_assert (read16 (p) == 0x0001);
7927 // FIXME: Support named parameters
7928 sig = mono_method_signature (attr->ctor);
7929 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7931 /* Two boolean arguments */
7935 mono_custom_attrs_free (attrs);
7938 ass->jit_optimizer_disabled = val;
7939 mono_memory_barrier ();
7940 ass->jit_optimizer_disabled_inited = TRUE;
7946 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7948 gboolean supported_tail_call;
7951 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7953 for (i = 0; i < fsig->param_count; ++i) {
7954 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7955 /* These can point to the current method's stack */
7956 supported_tail_call = FALSE;
7958 if (fsig->hasthis && cmethod->klass->valuetype)
7959 /* this might point to the current method's stack */
7960 supported_tail_call = FALSE;
7961 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7962 supported_tail_call = FALSE;
7963 if (cfg->method->save_lmf)
7964 supported_tail_call = FALSE;
7965 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7966 supported_tail_call = FALSE;
7967 if (call_opcode != CEE_CALL)
7968 supported_tail_call = FALSE;
7970 /* Debugging support */
7972 if (supported_tail_call) {
7973 if (!mono_debug_count ())
7974 supported_tail_call = FALSE;
7978 return supported_tail_call;
7984 * Handle calls made to ctors from NEWOBJ opcodes.
7987 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7988 MonoInst **sp, guint8 *ip, int *inline_costs)
7990 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7992 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7993 mono_method_is_generic_sharable (cmethod, TRUE)) {
7994 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7995 mono_class_vtable (cfg->domain, cmethod->klass);
7996 CHECK_TYPELOAD (cmethod->klass);
7998 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7999 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8002 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8003 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8005 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8007 CHECK_TYPELOAD (cmethod->klass);
8008 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8013 /* Avoid virtual calls to ctors if possible */
8014 if (mono_class_is_marshalbyref (cmethod->klass))
8015 callvirt_this_arg = sp [0];
8017 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8018 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8019 CHECK_CFG_EXCEPTION;
8020 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8021 mono_method_check_inlining (cfg, cmethod) &&
8022 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8025 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8026 cfg->real_offset += 5;
8028 *inline_costs += costs - 5;
8030 INLINE_FAILURE ("inline failure");
8031 // FIXME-VT: Clean this up
8032 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8033 GSHAREDVT_FAILURE(*ip);
8034 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8036 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8039 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8041 if (cfg->llvm_only) {
8042 // FIXME: Avoid initializing vtable_arg
8043 emit_llvmonly_calli (cfg, fsig, sp, addr);
8045 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8047 } else if (context_used &&
8048 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8049 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8050 MonoInst *cmethod_addr;
8052 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8054 if (cfg->llvm_only) {
8055 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8056 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8057 emit_llvmonly_calli (cfg, fsig, sp, addr);
8059 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8060 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8062 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8065 INLINE_FAILURE ("ctor call");
8066 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8067 callvirt_this_arg, NULL, vtable_arg);
8074 emit_setret (MonoCompile *cfg, MonoInst *val)
8076 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8079 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8082 if (!cfg->vret_addr) {
8083 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8085 EMIT_NEW_RETLOADA (cfg, ret_addr);
8087 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8088 ins->klass = mono_class_from_mono_type (ret_type);
8091 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8092 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8093 MonoInst *iargs [1];
8097 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8098 mono_arch_emit_setret (cfg, cfg->method, conv);
8100 mono_arch_emit_setret (cfg, cfg->method, val);
8103 mono_arch_emit_setret (cfg, cfg->method, val);
8109 * mono_method_to_ir:
8111 * Translate the .net IL into linear IR.
8114 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8115 MonoInst *return_var, MonoInst **inline_args,
8116 guint inline_offset, gboolean is_virtual_call)
8119 MonoInst *ins, **sp, **stack_start;
8120 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8121 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8122 MonoMethod *cmethod, *method_definition;
8123 MonoInst **arg_array;
8124 MonoMethodHeader *header;
8126 guint32 token, ins_flag;
8128 MonoClass *constrained_class = NULL;
8129 unsigned char *ip, *end, *target, *err_pos;
8130 MonoMethodSignature *sig;
8131 MonoGenericContext *generic_context = NULL;
8132 MonoGenericContainer *generic_container = NULL;
8133 MonoType **param_types;
8134 int i, n, start_new_bblock, dreg;
8135 int num_calls = 0, inline_costs = 0;
8136 int breakpoint_id = 0;
8138 GSList *class_inits = NULL;
8139 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8141 gboolean init_locals, seq_points, skip_dead_blocks;
8142 gboolean sym_seq_points = FALSE;
8143 MonoDebugMethodInfo *minfo;
8144 MonoBitSet *seq_point_locs = NULL;
8145 MonoBitSet *seq_point_set_locs = NULL;
8147 cfg->disable_inline = is_jit_optimizer_disabled (method);
8149 /* serialization and xdomain stuff may need access to private fields and methods */
8150 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8151 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8152 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8153 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8154 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8155 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8157 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8158 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8159 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8160 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8161 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8163 image = method->klass->image;
8164 header = mono_method_get_header (method);
8166 if (mono_loader_get_last_error ()) {
8167 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8168 mono_error_set_from_loader_error (&cfg->error);
8170 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name));
8172 goto exception_exit;
8174 generic_container = mono_method_get_generic_container (method);
8175 sig = mono_method_signature (method);
8176 num_args = sig->hasthis + sig->param_count;
8177 ip = (unsigned char*)header->code;
8178 cfg->cil_start = ip;
8179 end = ip + header->code_size;
8180 cfg->stat_cil_code_size += header->code_size;
8182 seq_points = cfg->gen_seq_points && cfg->method == method;
8184 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8185 /* We could hit a seq point before attaching to the JIT (#8338) */
8189 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8190 minfo = mono_debug_lookup_method (method);
8192 MonoSymSeqPoint *sps;
8193 int i, n_il_offsets;
8195 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8196 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8197 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8198 sym_seq_points = TRUE;
8199 for (i = 0; i < n_il_offsets; ++i) {
8200 if (sps [i].il_offset < header->code_size)
8201 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8204 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8205 /* Methods without line number info like auto-generated property accessors */
8206 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8207 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8208 sym_seq_points = TRUE;
8213 * Methods without init_locals set could cause asserts in various passes
8214 * (#497220). To work around this, we emit dummy initialization opcodes
8215 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8216 * on some platforms.
8218 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8219 init_locals = header->init_locals;
8223 method_definition = method;
8224 while (method_definition->is_inflated) {
8225 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8226 method_definition = imethod->declaring;
8229 /* SkipVerification is not allowed if core-clr is enabled */
8230 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8232 dont_verify_stloc = TRUE;
8235 if (sig->is_inflated)
8236 generic_context = mono_method_get_context (method);
8237 else if (generic_container)
8238 generic_context = &generic_container->context;
8239 cfg->generic_context = generic_context;
8242 g_assert (!sig->has_type_parameters);
8244 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8245 g_assert (method->is_inflated);
8246 g_assert (mono_method_get_context (method)->method_inst);
8248 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8249 g_assert (sig->generic_param_count);
8251 if (cfg->method == method) {
8252 cfg->real_offset = 0;
8254 cfg->real_offset = inline_offset;
8257 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8258 cfg->cil_offset_to_bb_len = header->code_size;
8260 cfg->current_method = method;
8262 if (cfg->verbose_level > 2)
8263 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8265 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8267 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8268 for (n = 0; n < sig->param_count; ++n)
8269 param_types [n + sig->hasthis] = sig->params [n];
8270 cfg->arg_types = param_types;
8272 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8273 if (cfg->method == method) {
8275 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8276 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8279 NEW_BBLOCK (cfg, start_bblock);
8280 cfg->bb_entry = start_bblock;
8281 start_bblock->cil_code = NULL;
8282 start_bblock->cil_length = 0;
8285 NEW_BBLOCK (cfg, end_bblock);
8286 cfg->bb_exit = end_bblock;
8287 end_bblock->cil_code = NULL;
8288 end_bblock->cil_length = 0;
8289 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8290 g_assert (cfg->num_bblocks == 2);
8292 arg_array = cfg->args;
8294 if (header->num_clauses) {
8295 cfg->spvars = g_hash_table_new (NULL, NULL);
8296 cfg->exvars = g_hash_table_new (NULL, NULL);
8298 /* handle exception clauses */
8299 for (i = 0; i < header->num_clauses; ++i) {
8300 MonoBasicBlock *try_bb;
8301 MonoExceptionClause *clause = &header->clauses [i];
8302 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8304 try_bb->real_offset = clause->try_offset;
8305 try_bb->try_start = TRUE;
8306 try_bb->region = ((i + 1) << 8) | clause->flags;
8307 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8308 tblock->real_offset = clause->handler_offset;
8309 tblock->flags |= BB_EXCEPTION_HANDLER;
8312 * Linking the try block with the EH block hinders inlining as we won't be able to
8313 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8315 if (COMPILE_LLVM (cfg))
8316 link_bblock (cfg, try_bb, tblock);
8318 if (*(ip + clause->handler_offset) == CEE_POP)
8319 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8321 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8322 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8323 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8324 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8325 MONO_ADD_INS (tblock, ins);
8327 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8328 /* finally clauses already have a seq point */
8329 /* seq points for filter clauses are emitted below */
8330 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8331 MONO_ADD_INS (tblock, ins);
8334 /* todo: is a fault block unsafe to optimize? */
8335 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8336 tblock->flags |= BB_EXCEPTION_UNSAFE;
8339 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8341 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8343 /* catch and filter blocks get the exception object on the stack */
8344 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8345 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8347 /* mostly like handle_stack_args (), but just sets the input args */
8348 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8349 tblock->in_scount = 1;
8350 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8351 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8355 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8356 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8357 if (!cfg->compile_llvm) {
8358 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8359 ins->dreg = tblock->in_stack [0]->dreg;
8360 MONO_ADD_INS (tblock, ins);
8363 MonoInst *dummy_use;
8366 * Add a dummy use for the exvar so its liveness info will be
8369 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8372 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8373 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8374 MONO_ADD_INS (tblock, ins);
8377 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8378 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8379 tblock->flags |= BB_EXCEPTION_HANDLER;
8380 tblock->real_offset = clause->data.filter_offset;
8381 tblock->in_scount = 1;
8382 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8383 /* The filter block shares the exvar with the handler block */
8384 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8385 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8386 MONO_ADD_INS (tblock, ins);
8390 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8391 clause->data.catch_class &&
8393 mono_class_check_context_used (clause->data.catch_class)) {
8395 * In shared generic code with catch
8396 * clauses containing type variables
8397 * the exception handling code has to
8398 * be able to get to the rgctx.
8399 * Therefore we have to make sure that
8400 * the vtable/mrgctx argument (for
8401 * static or generic methods) or the
8402 * "this" argument (for non-static
8403 * methods) are live.
8405 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8406 mini_method_get_context (method)->method_inst ||
8407 method->klass->valuetype) {
8408 mono_get_vtable_var (cfg);
8410 MonoInst *dummy_use;
8412 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8417 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8418 cfg->cbb = start_bblock;
8419 cfg->args = arg_array;
8420 mono_save_args (cfg, sig, inline_args);
8423 /* FIRST CODE BLOCK */
8424 NEW_BBLOCK (cfg, tblock);
8425 tblock->cil_code = ip;
8429 ADD_BBLOCK (cfg, tblock);
8431 if (cfg->method == method) {
8432 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8433 if (breakpoint_id) {
8434 MONO_INST_NEW (cfg, ins, OP_BREAK);
8435 MONO_ADD_INS (cfg->cbb, ins);
8439 /* we use a separate basic block for the initialization code */
8440 NEW_BBLOCK (cfg, init_localsbb);
8441 cfg->bb_init = init_localsbb;
8442 init_localsbb->real_offset = cfg->real_offset;
8443 start_bblock->next_bb = init_localsbb;
8444 init_localsbb->next_bb = cfg->cbb;
8445 link_bblock (cfg, start_bblock, init_localsbb);
8446 link_bblock (cfg, init_localsbb, cfg->cbb);
8448 cfg->cbb = init_localsbb;
8450 if (cfg->gsharedvt && cfg->method == method) {
8451 MonoGSharedVtMethodInfo *info;
8452 MonoInst *var, *locals_var;
8455 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8456 info->method = cfg->method;
8457 info->count_entries = 16;
8458 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8459 cfg->gsharedvt_info = info;
8461 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8462 /* prevent it from being register allocated */
8463 //var->flags |= MONO_INST_VOLATILE;
8464 cfg->gsharedvt_info_var = var;
8466 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8467 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8469 /* Allocate locals */
8470 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8471 /* prevent it from being register allocated */
8472 //locals_var->flags |= MONO_INST_VOLATILE;
8473 cfg->gsharedvt_locals_var = locals_var;
8475 dreg = alloc_ireg (cfg);
8476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8478 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8479 ins->dreg = locals_var->dreg;
8481 MONO_ADD_INS (cfg->cbb, ins);
8482 cfg->gsharedvt_locals_var_ins = ins;
8484 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8487 ins->flags |= MONO_INST_INIT;
8491 if (mono_security_core_clr_enabled ()) {
8492 /* check if this is native code, e.g. an icall or a p/invoke */
8493 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8494 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8496 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8497 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8499 /* if this ia a native call then it can only be JITted from platform code */
8500 if ((icall || pinvk) && method->klass && method->klass->image) {
8501 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8502 MonoException *ex = icall ? mono_get_exception_security () :
8503 mono_get_exception_method_access ();
8504 emit_throw_exception (cfg, ex);
8511 CHECK_CFG_EXCEPTION;
8513 if (header->code_size == 0)
8516 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8521 if (cfg->method == method)
8522 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8524 for (n = 0; n < header->num_locals; ++n) {
8525 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8530 /* We force the vtable variable here for all shared methods
8531 for the possibility that they might show up in a stack
8532 trace where their exact instantiation is needed. */
8533 if (cfg->gshared && method == cfg->method) {
8534 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8535 mini_method_get_context (method)->method_inst ||
8536 method->klass->valuetype) {
8537 mono_get_vtable_var (cfg);
8539 /* FIXME: Is there a better way to do this?
8540 We need the variable live for the duration
8541 of the whole method. */
8542 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8546 /* add a check for this != NULL to inlined methods */
8547 if (is_virtual_call) {
8550 NEW_ARGLOAD (cfg, arg_ins, 0);
8551 MONO_ADD_INS (cfg->cbb, arg_ins);
8552 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8555 skip_dead_blocks = !dont_verify;
8556 if (skip_dead_blocks) {
8557 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8562 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8563 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8566 start_new_bblock = 0;
8568 if (cfg->method == method)
8569 cfg->real_offset = ip - header->code;
8571 cfg->real_offset = inline_offset;
8576 if (start_new_bblock) {
8577 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8578 if (start_new_bblock == 2) {
8579 g_assert (ip == tblock->cil_code);
8581 GET_BBLOCK (cfg, tblock, ip);
8583 cfg->cbb->next_bb = tblock;
8585 start_new_bblock = 0;
8586 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8587 if (cfg->verbose_level > 3)
8588 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8589 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8593 g_slist_free (class_inits);
8596 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8597 link_bblock (cfg, cfg->cbb, tblock);
8598 if (sp != stack_start) {
8599 handle_stack_args (cfg, stack_start, sp - stack_start);
8601 CHECK_UNVERIFIABLE (cfg);
8603 cfg->cbb->next_bb = tblock;
8605 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8606 if (cfg->verbose_level > 3)
8607 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8608 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8611 g_slist_free (class_inits);
8616 if (skip_dead_blocks) {
8617 int ip_offset = ip - header->code;
8619 if (ip_offset == bb->end)
8623 int op_size = mono_opcode_size (ip, end);
8624 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8626 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8628 if (ip_offset + op_size == bb->end) {
8629 MONO_INST_NEW (cfg, ins, OP_NOP);
8630 MONO_ADD_INS (cfg->cbb, ins);
8631 start_new_bblock = 1;
8639 * Sequence points are points where the debugger can place a breakpoint.
8640 * Currently, we generate these automatically at points where the IL
8643 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8645 * Make methods interruptable at the beginning, and at the targets of
8646 * backward branches.
8647 * Also, do this at the start of every bblock in methods with clauses too,
8648 * to be able to handle instructions with inprecise control flow like
8650 * Backward branches are handled at the end of method-to-ir ().
8652 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8653 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8655 /* Avoid sequence points on empty IL like .volatile */
8656 // FIXME: Enable this
8657 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8658 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8659 if ((sp != stack_start) && !sym_seq_point)
8660 ins->flags |= MONO_INST_NONEMPTY_STACK;
8661 MONO_ADD_INS (cfg->cbb, ins);
8664 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8667 cfg->cbb->real_offset = cfg->real_offset;
8669 if ((cfg->method == method) && cfg->coverage_info) {
8670 guint32 cil_offset = ip - header->code;
8671 cfg->coverage_info->data [cil_offset].cil_code = ip;
8673 /* TODO: Use an increment here */
8674 #if defined(TARGET_X86)
8675 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8676 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8678 MONO_ADD_INS (cfg->cbb, ins);
8680 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8681 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8685 if (cfg->verbose_level > 3)
8686 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8690 if (seq_points && !sym_seq_points && sp != stack_start) {
8692 * The C# compiler uses these nops to notify the JIT that it should
8693 * insert seq points.
8695 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8696 MONO_ADD_INS (cfg->cbb, ins);
8698 if (cfg->keep_cil_nops)
8699 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8701 MONO_INST_NEW (cfg, ins, OP_NOP);
8703 MONO_ADD_INS (cfg->cbb, ins);
8706 if (should_insert_brekpoint (cfg->method)) {
8707 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8709 MONO_INST_NEW (cfg, ins, OP_NOP);
8712 MONO_ADD_INS (cfg->cbb, ins);
8718 CHECK_STACK_OVF (1);
8719 n = (*ip)-CEE_LDARG_0;
8721 EMIT_NEW_ARGLOAD (cfg, ins, n);
8729 CHECK_STACK_OVF (1);
8730 n = (*ip)-CEE_LDLOC_0;
8732 EMIT_NEW_LOCLOAD (cfg, ins, n);
8741 n = (*ip)-CEE_STLOC_0;
8744 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8746 emit_stloc_ir (cfg, sp, header, n);
8753 CHECK_STACK_OVF (1);
8756 EMIT_NEW_ARGLOAD (cfg, ins, n);
8762 CHECK_STACK_OVF (1);
8765 NEW_ARGLOADA (cfg, ins, n);
8766 MONO_ADD_INS (cfg->cbb, ins);
8776 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8778 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8783 CHECK_STACK_OVF (1);
8786 EMIT_NEW_LOCLOAD (cfg, ins, n);
8790 case CEE_LDLOCA_S: {
8791 unsigned char *tmp_ip;
8793 CHECK_STACK_OVF (1);
8794 CHECK_LOCAL (ip [1]);
8796 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8802 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8811 CHECK_LOCAL (ip [1]);
8812 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8814 emit_stloc_ir (cfg, sp, header, ip [1]);
8819 CHECK_STACK_OVF (1);
8820 EMIT_NEW_PCONST (cfg, ins, NULL);
8821 ins->type = STACK_OBJ;
8826 CHECK_STACK_OVF (1);
8827 EMIT_NEW_ICONST (cfg, ins, -1);
8840 CHECK_STACK_OVF (1);
8841 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8847 CHECK_STACK_OVF (1);
8849 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8855 CHECK_STACK_OVF (1);
8856 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8862 CHECK_STACK_OVF (1);
8863 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8864 ins->type = STACK_I8;
8865 ins->dreg = alloc_dreg (cfg, STACK_I8);
8867 ins->inst_l = (gint64)read64 (ip);
8868 MONO_ADD_INS (cfg->cbb, ins);
8874 gboolean use_aotconst = FALSE;
8876 #ifdef TARGET_POWERPC
8877 /* FIXME: Clean this up */
8878 if (cfg->compile_aot)
8879 use_aotconst = TRUE;
8882 /* FIXME: we should really allocate this only late in the compilation process */
8883 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8885 CHECK_STACK_OVF (1);
8891 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8893 dreg = alloc_freg (cfg);
8894 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8895 ins->type = cfg->r4_stack_type;
8897 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8898 ins->type = cfg->r4_stack_type;
8899 ins->dreg = alloc_dreg (cfg, STACK_R8);
8901 MONO_ADD_INS (cfg->cbb, ins);
8911 gboolean use_aotconst = FALSE;
8913 #ifdef TARGET_POWERPC
8914 /* FIXME: Clean this up */
8915 if (cfg->compile_aot)
8916 use_aotconst = TRUE;
8919 /* FIXME: we should really allocate this only late in the compilation process */
8920 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8922 CHECK_STACK_OVF (1);
8928 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8930 dreg = alloc_freg (cfg);
8931 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8932 ins->type = STACK_R8;
8934 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8935 ins->type = STACK_R8;
8936 ins->dreg = alloc_dreg (cfg, STACK_R8);
8938 MONO_ADD_INS (cfg->cbb, ins);
8947 MonoInst *temp, *store;
8949 CHECK_STACK_OVF (1);
8953 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8954 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8956 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8959 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8972 if (sp [0]->type == STACK_R8)
8973 /* we need to pop the value from the x86 FP stack */
8974 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8979 MonoMethodSignature *fsig;
8982 INLINE_FAILURE ("jmp");
8983 GSHAREDVT_FAILURE (*ip);
8986 if (stack_start != sp)
8988 token = read32 (ip + 1);
8989 /* FIXME: check the signature matches */
8990 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8993 if (cfg->gshared && mono_method_check_context_used (cmethod))
8994 GENERIC_SHARING_FAILURE (CEE_JMP);
8996 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8998 fsig = mono_method_signature (cmethod);
8999 n = fsig->param_count + fsig->hasthis;
9000 if (cfg->llvm_only) {
9003 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9004 for (i = 0; i < n; ++i)
9005 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9006 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9008 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9009 * have to emit a normal return since llvm expects it.
9012 emit_setret (cfg, ins);
9013 MONO_INST_NEW (cfg, ins, OP_BR);
9014 ins->inst_target_bb = end_bblock;
9015 MONO_ADD_INS (cfg->cbb, ins);
9016 link_bblock (cfg, cfg->cbb, end_bblock);
9019 } else if (cfg->backend->have_op_tail_call) {
9020 /* Handle tail calls similarly to calls */
9023 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9024 call->method = cmethod;
9025 call->tail_call = TRUE;
9026 call->signature = mono_method_signature (cmethod);
9027 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9028 call->inst.inst_p0 = cmethod;
9029 for (i = 0; i < n; ++i)
9030 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9032 mono_arch_emit_call (cfg, call);
9033 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9034 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9036 for (i = 0; i < num_args; ++i)
9037 /* Prevent arguments from being optimized away */
9038 arg_array [i]->flags |= MONO_INST_VOLATILE;
9040 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9041 ins = (MonoInst*)call;
9042 ins->inst_p0 = cmethod;
9043 MONO_ADD_INS (cfg->cbb, ins);
9047 start_new_bblock = 1;
9052 MonoMethodSignature *fsig;
9055 token = read32 (ip + 1);
9059 //GSHAREDVT_FAILURE (*ip);
9064 fsig = mini_get_signature (method, token, generic_context);
9066 if (method->dynamic && fsig->pinvoke) {
9070 * This is a call through a function pointer using a pinvoke
9071 * signature. Have to create a wrapper and call that instead.
9072 * FIXME: This is very slow, need to create a wrapper at JIT time
9073 * instead based on the signature.
9075 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9076 EMIT_NEW_PCONST (cfg, args [1], fsig);
9078 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9081 n = fsig->param_count + fsig->hasthis;
9085 //g_assert (!virtual_ || fsig->hasthis);
9089 inline_costs += 10 * num_calls++;
9092 * Making generic calls out of gsharedvt methods.
9093 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9094 * patching gshared method addresses into a gsharedvt method.
9096 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9098 * We pass the address to the gsharedvt trampoline in the rgctx reg
9100 MonoInst *callee = addr;
9102 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9104 GSHAREDVT_FAILURE (*ip);
9108 GSHAREDVT_FAILURE (*ip);
9110 addr = emit_get_rgctx_sig (cfg, context_used,
9111 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9112 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9116 /* Prevent inlining of methods with indirect calls */
9117 INLINE_FAILURE ("indirect call");
9119 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9120 MonoJumpInfoType info_type;
9124 * Instead of emitting an indirect call, emit a direct call
9125 * with the contents of the aotconst as the patch info.
9127 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9128 info_type = (MonoJumpInfoType)addr->inst_c1;
9129 info_data = addr->inst_p0;
9131 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9132 info_data = addr->inst_right->inst_left;
9135 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9136 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9141 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9145 /* End of call, INS should contain the result of the call, if any */
9147 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9149 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9152 CHECK_CFG_EXCEPTION;
9156 constrained_class = NULL;
9160 case CEE_CALLVIRT: {
9161 MonoInst *addr = NULL;
9162 MonoMethodSignature *fsig = NULL;
9164 int virtual_ = *ip == CEE_CALLVIRT;
9165 gboolean pass_imt_from_rgctx = FALSE;
9166 MonoInst *imt_arg = NULL;
9167 MonoInst *keep_this_alive = NULL;
9168 gboolean pass_vtable = FALSE;
9169 gboolean pass_mrgctx = FALSE;
9170 MonoInst *vtable_arg = NULL;
9171 gboolean check_this = FALSE;
9172 gboolean supported_tail_call = FALSE;
9173 gboolean tail_call = FALSE;
9174 gboolean need_seq_point = FALSE;
9175 guint32 call_opcode = *ip;
9176 gboolean emit_widen = TRUE;
9177 gboolean push_res = TRUE;
9178 gboolean skip_ret = FALSE;
9179 gboolean delegate_invoke = FALSE;
9180 gboolean direct_icall = FALSE;
9181 gboolean constrained_partial_call = FALSE;
9182 MonoMethod *cil_method;
9185 token = read32 (ip + 1);
9189 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9192 cil_method = cmethod;
9194 if (constrained_class) {
9195 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9196 if (!mini_is_gsharedvt_klass (constrained_class)) {
9197 g_assert (!cmethod->klass->valuetype);
9198 if (!mini_type_is_reference (&constrained_class->byval_arg))
9199 constrained_partial_call = TRUE;
9203 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9204 if (cfg->verbose_level > 2)
9205 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9206 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9207 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9209 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9213 if (cfg->verbose_level > 2)
9214 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9216 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9218 * This is needed since get_method_constrained can't find
9219 * the method in klass representing a type var.
9220 * The type var is guaranteed to be a reference type in this
9223 if (!mini_is_gsharedvt_klass (constrained_class))
9224 g_assert (!cmethod->klass->valuetype);
9226 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9232 if (!cmethod || mono_loader_get_last_error ()) {
9233 if (mono_loader_get_last_error ()) {
9234 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9235 mono_error_set_from_loader_error (&cfg->error);
9241 if (!dont_verify && !cfg->skip_visibility) {
9242 MonoMethod *target_method = cil_method;
9243 if (method->is_inflated) {
9244 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9247 if (!mono_method_can_access_method (method_definition, target_method) &&
9248 !mono_method_can_access_method (method, cil_method))
9249 METHOD_ACCESS_FAILURE (method, cil_method);
9252 if (mono_security_core_clr_enabled ())
9253 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9255 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9256 /* MS.NET seems to silently convert this to a callvirt */
9261 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9262 * converts to a callvirt.
9264 * tests/bug-515884.il is an example of this behavior
9266 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9267 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9268 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9272 if (!cmethod->klass->inited)
9273 if (!mono_class_init (cmethod->klass))
9274 TYPE_LOAD_ERROR (cmethod->klass);
9276 fsig = mono_method_signature (cmethod);
9279 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9280 mini_class_is_system_array (cmethod->klass)) {
9281 array_rank = cmethod->klass->rank;
9282 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9283 direct_icall = TRUE;
9284 } else if (fsig->pinvoke) {
9285 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9286 fsig = mono_method_signature (wrapper);
9287 } else if (constrained_class) {
9289 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9293 if (cfg->llvm_only && !cfg->method->wrapper_type)
9294 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9296 /* See code below */
9297 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9298 MonoBasicBlock *tbb;
9300 GET_BBLOCK (cfg, tbb, ip + 5);
9301 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9303 * We want to extend the try block to cover the call, but we can't do it if the
9304 * call is made directly since its followed by an exception check.
9306 direct_icall = FALSE;
9310 mono_save_token_info (cfg, image, token, cil_method);
9312 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9313 need_seq_point = TRUE;
9315 /* Don't support calls made using type arguments for now */
9317 if (cfg->gsharedvt) {
9318 if (mini_is_gsharedvt_signature (fsig))
9319 GSHAREDVT_FAILURE (*ip);
9323 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9324 g_assert_not_reached ();
9326 n = fsig->param_count + fsig->hasthis;
9328 if (!cfg->gshared && cmethod->klass->generic_container)
9332 g_assert (!mono_method_check_context_used (cmethod));
9336 //g_assert (!virtual_ || fsig->hasthis);
9341 * We have the `constrained.' prefix opcode.
9343 if (constrained_class) {
9344 if (mini_is_gsharedvt_klass (constrained_class)) {
9345 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9346 /* The 'Own method' case below */
9347 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9348 /* 'The type parameter is instantiated as a reference type' case below. */
9350 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9351 CHECK_CFG_EXCEPTION;
9357 if (constrained_partial_call) {
9358 gboolean need_box = TRUE;
9361 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9362 * called method is not known at compile time either. The called method could end up being
9363 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9364 * to box the receiver.
9365 * A simple solution would be to box always and make a normal virtual call, but that would
9366 * be bad performance wise.
9368 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9370 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9375 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9376 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9377 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9378 ins->klass = constrained_class;
9379 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9380 CHECK_CFG_EXCEPTION;
9381 } else if (need_box) {
9383 MonoBasicBlock *is_ref_bb, *end_bb;
9384 MonoInst *nonbox_call;
9387 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9389 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9390 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9392 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9394 NEW_BBLOCK (cfg, is_ref_bb);
9395 NEW_BBLOCK (cfg, end_bb);
9397 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9398 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9399 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9402 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9404 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9407 MONO_START_BB (cfg, is_ref_bb);
9408 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9409 ins->klass = constrained_class;
9410 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9411 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9413 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9415 MONO_START_BB (cfg, end_bb);
9418 nonbox_call->dreg = ins->dreg;
9421 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9422 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9423 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9426 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9428 * The type parameter is instantiated as a valuetype,
9429 * but that type doesn't override the method we're
9430 * calling, so we need to box `this'.
9432 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9433 ins->klass = constrained_class;
9434 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9435 CHECK_CFG_EXCEPTION;
9436 } else if (!constrained_class->valuetype) {
9437 int dreg = alloc_ireg_ref (cfg);
9440 * The type parameter is instantiated as a reference
9441 * type. We have a managed pointer on the stack, so
9442 * we need to dereference it here.
9444 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9445 ins->type = STACK_OBJ;
9448 if (cmethod->klass->valuetype) {
9451 /* Interface method */
9454 mono_class_setup_vtable (constrained_class);
9455 CHECK_TYPELOAD (constrained_class);
9456 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9458 TYPE_LOAD_ERROR (constrained_class);
9459 slot = mono_method_get_vtable_slot (cmethod);
9461 TYPE_LOAD_ERROR (cmethod->klass);
9462 cmethod = constrained_class->vtable [ioffset + slot];
9464 if (cmethod->klass == mono_defaults.enum_class) {
9465 /* Enum implements some interfaces, so treat this as the first case */
9466 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9467 ins->klass = constrained_class;
9468 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9469 CHECK_CFG_EXCEPTION;
9474 constrained_class = NULL;
9477 if (check_call_signature (cfg, fsig, sp))
9480 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9481 delegate_invoke = TRUE;
9483 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9484 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9485 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9493 * If the callee is a shared method, then its static cctor
9494 * might not get called after the call was patched.
9496 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9497 emit_class_init (cfg, cmethod->klass);
9498 CHECK_TYPELOAD (cmethod->klass);
9501 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9504 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9506 context_used = mini_method_check_context_used (cfg, cmethod);
9508 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9509 /* Generic method interface
9510 calls are resolved via a
9511 helper function and don't
9513 if (!cmethod_context || !cmethod_context->method_inst)
9514 pass_imt_from_rgctx = TRUE;
9518 * If a shared method calls another
9519 * shared method then the caller must
9520 * have a generic sharing context
9521 * because the magic trampoline
9522 * requires it. FIXME: We shouldn't
9523 * have to force the vtable/mrgctx
9524 * variable here. Instead there
9525 * should be a flag in the cfg to
9526 * request a generic sharing context.
9529 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9530 mono_get_vtable_var (cfg);
9535 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9537 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9539 CHECK_TYPELOAD (cmethod->klass);
9540 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9545 g_assert (!vtable_arg);
9547 if (!cfg->compile_aot) {
9549 * emit_get_rgctx_method () calls mono_class_vtable () so check
9550 * for type load errors before.
9552 mono_class_setup_vtable (cmethod->klass);
9553 CHECK_TYPELOAD (cmethod->klass);
9556 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9558 /* !marshalbyref is needed to properly handle generic methods + remoting */
9559 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9560 MONO_METHOD_IS_FINAL (cmethod)) &&
9561 !mono_class_is_marshalbyref (cmethod->klass)) {
9568 if (pass_imt_from_rgctx) {
9569 g_assert (!pass_vtable);
9571 imt_arg = emit_get_rgctx_method (cfg, context_used,
9572 cmethod, MONO_RGCTX_INFO_METHOD);
9576 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9578 /* Calling virtual generic methods */
9579 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9580 !(MONO_METHOD_IS_FINAL (cmethod) &&
9581 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9582 fsig->generic_param_count &&
9583 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9585 MonoInst *this_temp, *this_arg_temp, *store;
9586 MonoInst *iargs [4];
9588 g_assert (fsig->is_inflated);
9590 /* Prevent inlining of methods that contain indirect calls */
9591 INLINE_FAILURE ("virtual generic call");
9593 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9594 GSHAREDVT_FAILURE (*ip);
9596 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9597 g_assert (!imt_arg);
9599 g_assert (cmethod->is_inflated);
9600 imt_arg = emit_get_rgctx_method (cfg, context_used,
9601 cmethod, MONO_RGCTX_INFO_METHOD);
9602 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9604 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9605 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9606 MONO_ADD_INS (cfg->cbb, store);
9608 /* FIXME: This should be a managed pointer */
9609 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9611 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9612 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9613 cmethod, MONO_RGCTX_INFO_METHOD);
9614 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9615 addr = mono_emit_jit_icall (cfg,
9616 mono_helper_compile_generic_method, iargs);
9618 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9620 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9627 * Implement a workaround for the inherent races involved in locking:
9633 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9634 * try block, the Exit () won't be executed, see:
9635 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9636 * To work around this, we extend such try blocks to include the last x bytes
9637 * of the Monitor.Enter () call.
9639 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9640 MonoBasicBlock *tbb;
9642 GET_BBLOCK (cfg, tbb, ip + 5);
9644 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9645 * from Monitor.Enter like ArgumentNullException.
9647 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9648 /* Mark this bblock as needing to be extended */
9649 tbb->extend_try_block = TRUE;
9653 /* Conversion to a JIT intrinsic */
9654 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9655 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9656 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9663 if ((cfg->opt & MONO_OPT_INLINE) &&
9664 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9665 mono_method_check_inlining (cfg, cmethod)) {
9667 gboolean always = FALSE;
9669 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9670 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9671 /* Prevent inlining of methods that call wrappers */
9672 INLINE_FAILURE ("wrapper call");
9673 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9677 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9679 cfg->real_offset += 5;
9681 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9682 /* *sp is already set by inline_method */
9687 inline_costs += costs;
9693 /* Tail recursion elimination */
9694 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9695 gboolean has_vtargs = FALSE;
9698 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9699 INLINE_FAILURE ("tail call");
9701 /* keep it simple */
9702 for (i = fsig->param_count - 1; i >= 0; i--) {
9703 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9708 for (i = 0; i < n; ++i)
9709 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9710 MONO_INST_NEW (cfg, ins, OP_BR);
9711 MONO_ADD_INS (cfg->cbb, ins);
9712 tblock = start_bblock->out_bb [0];
9713 link_bblock (cfg, cfg->cbb, tblock);
9714 ins->inst_target_bb = tblock;
9715 start_new_bblock = 1;
9717 /* skip the CEE_RET, too */
9718 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9725 inline_costs += 10 * num_calls++;
9728 * Making generic calls out of gsharedvt methods.
9729 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9730 * patching gshared method addresses into a gsharedvt method.
9732 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9733 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9734 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9735 MonoRgctxInfoType info_type;
9738 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9739 //GSHAREDVT_FAILURE (*ip);
9740 // disable for possible remoting calls
9741 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9742 GSHAREDVT_FAILURE (*ip);
9743 if (fsig->generic_param_count) {
9744 /* virtual generic call */
9745 g_assert (!imt_arg);
9746 /* Same as the virtual generic case above */
9747 imt_arg = emit_get_rgctx_method (cfg, context_used,
9748 cmethod, MONO_RGCTX_INFO_METHOD);
9749 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9751 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9752 /* This can happen when we call a fully instantiated iface method */
9753 imt_arg = emit_get_rgctx_method (cfg, context_used,
9754 cmethod, MONO_RGCTX_INFO_METHOD);
9759 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9760 keep_this_alive = sp [0];
9762 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9763 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9765 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9766 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9768 if (cfg->llvm_only) {
9769 // FIXME: Avoid initializing vtable_arg
9770 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9772 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9777 /* Generic sharing */
9780 * Use this if the callee is gsharedvt sharable too, since
9781 * at runtime we might find an instantiation so the call cannot
9782 * be patched (the 'no_patch' code path in mini-trampolines.c).
9784 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9785 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9786 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9787 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9788 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9789 INLINE_FAILURE ("gshared");
9791 g_assert (cfg->gshared && cmethod);
9795 * We are compiling a call to a
9796 * generic method from shared code,
9797 * which means that we have to look up
9798 * the method in the rgctx and do an
9802 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9804 if (cfg->llvm_only) {
9805 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9806 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9808 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9809 // FIXME: Avoid initializing imt_arg/vtable_arg
9810 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9812 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9813 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9818 /* Direct calls to icalls */
9820 MonoMethod *wrapper;
9823 /* Inline the wrapper */
9824 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9826 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9827 g_assert (costs > 0);
9828 cfg->real_offset += 5;
9830 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9831 /* *sp is already set by inline_method */
9836 inline_costs += costs;
9845 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9846 MonoInst *val = sp [fsig->param_count];
9848 if (val->type == STACK_OBJ) {
9849 MonoInst *iargs [2];
9854 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9857 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9858 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9859 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9860 emit_write_barrier (cfg, addr, val);
9861 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9862 GSHAREDVT_FAILURE (*ip);
9863 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9864 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9866 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9867 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9868 if (!cmethod->klass->element_class->valuetype && !readonly)
9869 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9870 CHECK_TYPELOAD (cmethod->klass);
9873 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9876 g_assert_not_reached ();
9883 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9887 /* Tail prefix / tail call optimization */
9889 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9890 /* FIXME: runtime generic context pointer for jumps? */
9891 /* FIXME: handle this for generic sharing eventually */
9892 if ((ins_flag & MONO_INST_TAILCALL) &&
9893 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9894 supported_tail_call = TRUE;
9896 if (supported_tail_call) {
9899 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9900 INLINE_FAILURE ("tail call");
9902 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9904 if (cfg->backend->have_op_tail_call) {
9905 /* Handle tail calls similarly to normal calls */
9908 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9910 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9911 call->tail_call = TRUE;
9912 call->method = cmethod;
9913 call->signature = mono_method_signature (cmethod);
9916 * We implement tail calls by storing the actual arguments into the
9917 * argument variables, then emitting a CEE_JMP.
9919 for (i = 0; i < n; ++i) {
9920 /* Prevent argument from being register allocated */
9921 arg_array [i]->flags |= MONO_INST_VOLATILE;
9922 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9924 ins = (MonoInst*)call;
9925 ins->inst_p0 = cmethod;
9926 ins->inst_p1 = arg_array [0];
9927 MONO_ADD_INS (cfg->cbb, ins);
9928 link_bblock (cfg, cfg->cbb, end_bblock);
9929 start_new_bblock = 1;
9931 // FIXME: Eliminate unreachable epilogs
9934 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9935 * only reachable from this call.
9937 GET_BBLOCK (cfg, tblock, ip + 5);
9938 if (tblock == cfg->cbb || tblock->in_count == 0)
9947 * Synchronized wrappers.
9948 * Its hard to determine where to replace a method with its synchronized
9949 * wrapper without causing an infinite recursion. The current solution is
9950 * to add the synchronized wrapper in the trampolines, and to
9951 * change the called method to a dummy wrapper, and resolve that wrapper
9952 * to the real method in mono_jit_compile_method ().
9954 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9955 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9956 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9957 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9961 * Virtual calls in llvm-only mode.
9963 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9964 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9969 INLINE_FAILURE ("call");
9970 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9971 imt_arg, vtable_arg);
9973 if (tail_call && !cfg->llvm_only) {
9974 link_bblock (cfg, cfg->cbb, end_bblock);
9975 start_new_bblock = 1;
9977 // FIXME: Eliminate unreachable epilogs
9980 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9981 * only reachable from this call.
9983 GET_BBLOCK (cfg, tblock, ip + 5);
9984 if (tblock == cfg->cbb || tblock->in_count == 0)
9991 /* End of call, INS should contain the result of the call, if any */
9993 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9996 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10001 if (keep_this_alive) {
10002 MonoInst *dummy_use;
10004 /* See mono_emit_method_call_full () */
10005 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10008 CHECK_CFG_EXCEPTION;
10012 g_assert (*ip == CEE_RET);
10016 constrained_class = NULL;
10017 if (need_seq_point)
10018 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10022 if (cfg->method != method) {
10023 /* return from inlined method */
10025 * If in_count == 0, that means the ret is unreachable due to
10026 * being preceeded by a throw. In that case, inline_method () will
10027 * handle setting the return value
10028 * (test case: test_0_inline_throw ()).
10030 if (return_var && cfg->cbb->in_count) {
10031 MonoType *ret_type = mono_method_signature (method)->ret;
10037 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10040 //g_assert (returnvar != -1);
10041 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10042 cfg->ret_var_set = TRUE;
10045 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10047 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10048 emit_pop_lmf (cfg);
10051 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10053 if (seq_points && !sym_seq_points) {
10055 * Place a seq point here too even through the IL stack is not
10056 * empty, so a step over on
10059 * will work correctly.
10061 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10062 MONO_ADD_INS (cfg->cbb, ins);
10065 g_assert (!return_var);
10069 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10072 emit_setret (cfg, *sp);
10075 if (sp != stack_start)
10077 MONO_INST_NEW (cfg, ins, OP_BR);
10079 ins->inst_target_bb = end_bblock;
10080 MONO_ADD_INS (cfg->cbb, ins);
10081 link_bblock (cfg, cfg->cbb, end_bblock);
10082 start_new_bblock = 1;
10086 MONO_INST_NEW (cfg, ins, OP_BR);
10088 target = ip + 1 + (signed char)(*ip);
10090 GET_BBLOCK (cfg, tblock, target);
10091 link_bblock (cfg, cfg->cbb, tblock);
10092 ins->inst_target_bb = tblock;
10093 if (sp != stack_start) {
10094 handle_stack_args (cfg, stack_start, sp - stack_start);
10096 CHECK_UNVERIFIABLE (cfg);
10098 MONO_ADD_INS (cfg->cbb, ins);
10099 start_new_bblock = 1;
10100 inline_costs += BRANCH_COST;
10114 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10116 target = ip + 1 + *(signed char*)ip;
10119 ADD_BINCOND (NULL);
10122 inline_costs += BRANCH_COST;
10126 MONO_INST_NEW (cfg, ins, OP_BR);
10129 target = ip + 4 + (gint32)read32(ip);
10131 GET_BBLOCK (cfg, tblock, target);
10132 link_bblock (cfg, cfg->cbb, tblock);
10133 ins->inst_target_bb = tblock;
10134 if (sp != stack_start) {
10135 handle_stack_args (cfg, stack_start, sp - stack_start);
10137 CHECK_UNVERIFIABLE (cfg);
10140 MONO_ADD_INS (cfg->cbb, ins);
10142 start_new_bblock = 1;
10143 inline_costs += BRANCH_COST;
10145 case CEE_BRFALSE_S:
10150 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10151 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10152 guint32 opsize = is_short ? 1 : 4;
10154 CHECK_OPSIZE (opsize);
10156 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10159 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10164 GET_BBLOCK (cfg, tblock, target);
10165 link_bblock (cfg, cfg->cbb, tblock);
10166 GET_BBLOCK (cfg, tblock, ip);
10167 link_bblock (cfg, cfg->cbb, tblock);
10169 if (sp != stack_start) {
10170 handle_stack_args (cfg, stack_start, sp - stack_start);
10171 CHECK_UNVERIFIABLE (cfg);
10174 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10175 cmp->sreg1 = sp [0]->dreg;
10176 type_from_op (cfg, cmp, sp [0], NULL);
10179 #if SIZEOF_REGISTER == 4
10180 if (cmp->opcode == OP_LCOMPARE_IMM) {
10181 /* Convert it to OP_LCOMPARE */
10182 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10183 ins->type = STACK_I8;
10184 ins->dreg = alloc_dreg (cfg, STACK_I8);
10186 MONO_ADD_INS (cfg->cbb, ins);
10187 cmp->opcode = OP_LCOMPARE;
10188 cmp->sreg2 = ins->dreg;
10191 MONO_ADD_INS (cfg->cbb, cmp);
10193 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10194 type_from_op (cfg, ins, sp [0], NULL);
10195 MONO_ADD_INS (cfg->cbb, ins);
10196 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10197 GET_BBLOCK (cfg, tblock, target);
10198 ins->inst_true_bb = tblock;
10199 GET_BBLOCK (cfg, tblock, ip);
10200 ins->inst_false_bb = tblock;
10201 start_new_bblock = 2;
10204 inline_costs += BRANCH_COST;
10219 MONO_INST_NEW (cfg, ins, *ip);
10221 target = ip + 4 + (gint32)read32(ip);
10224 ADD_BINCOND (NULL);
10227 inline_costs += BRANCH_COST;
10231 MonoBasicBlock **targets;
10232 MonoBasicBlock *default_bblock;
10233 MonoJumpInfoBBTable *table;
10234 int offset_reg = alloc_preg (cfg);
10235 int target_reg = alloc_preg (cfg);
10236 int table_reg = alloc_preg (cfg);
10237 int sum_reg = alloc_preg (cfg);
10238 gboolean use_op_switch;
10242 n = read32 (ip + 1);
10245 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10249 CHECK_OPSIZE (n * sizeof (guint32));
10250 target = ip + n * sizeof (guint32);
10252 GET_BBLOCK (cfg, default_bblock, target);
10253 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10255 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10256 for (i = 0; i < n; ++i) {
10257 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10258 targets [i] = tblock;
10259 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10263 if (sp != stack_start) {
10265 * Link the current bb with the targets as well, so handle_stack_args
10266 * will set their in_stack correctly.
10268 link_bblock (cfg, cfg->cbb, default_bblock);
10269 for (i = 0; i < n; ++i)
10270 link_bblock (cfg, cfg->cbb, targets [i]);
10272 handle_stack_args (cfg, stack_start, sp - stack_start);
10274 CHECK_UNVERIFIABLE (cfg);
10276 /* Undo the links */
10277 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10278 for (i = 0; i < n; ++i)
10279 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10282 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10283 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10285 for (i = 0; i < n; ++i)
10286 link_bblock (cfg, cfg->cbb, targets [i]);
10288 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10289 table->table = targets;
10290 table->table_size = n;
10292 use_op_switch = FALSE;
10294 /* ARM implements SWITCH statements differently */
10295 /* FIXME: Make it use the generic implementation */
10296 if (!cfg->compile_aot)
10297 use_op_switch = TRUE;
10300 if (COMPILE_LLVM (cfg))
10301 use_op_switch = TRUE;
10303 cfg->cbb->has_jump_table = 1;
10305 if (use_op_switch) {
10306 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10307 ins->sreg1 = src1->dreg;
10308 ins->inst_p0 = table;
10309 ins->inst_many_bb = targets;
10310 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10311 MONO_ADD_INS (cfg->cbb, ins);
10313 if (sizeof (gpointer) == 8)
10314 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10316 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10318 #if SIZEOF_REGISTER == 8
10319 /* The upper word might not be zero, and we add it to a 64 bit address later */
10320 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10323 if (cfg->compile_aot) {
10324 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10326 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10327 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10328 ins->inst_p0 = table;
10329 ins->dreg = table_reg;
10330 MONO_ADD_INS (cfg->cbb, ins);
10333 /* FIXME: Use load_memindex */
10334 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10335 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10336 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10338 start_new_bblock = 1;
10339 inline_costs += (BRANCH_COST * 2);
10352 case CEE_LDIND_REF:
10359 dreg = alloc_freg (cfg);
10362 dreg = alloc_lreg (cfg);
10364 case CEE_LDIND_REF:
10365 dreg = alloc_ireg_ref (cfg);
10368 dreg = alloc_preg (cfg);
10371 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10372 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10373 if (*ip == CEE_LDIND_R4)
10374 ins->type = cfg->r4_stack_type;
10375 ins->flags |= ins_flag;
10376 MONO_ADD_INS (cfg->cbb, ins);
10378 if (ins_flag & MONO_INST_VOLATILE) {
10379 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10380 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10385 case CEE_STIND_REF:
10396 if (ins_flag & MONO_INST_VOLATILE) {
10397 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10398 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10401 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10402 ins->flags |= ins_flag;
10405 MONO_ADD_INS (cfg->cbb, ins);
10407 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10408 emit_write_barrier (cfg, sp [0], sp [1]);
10417 MONO_INST_NEW (cfg, ins, (*ip));
10419 ins->sreg1 = sp [0]->dreg;
10420 ins->sreg2 = sp [1]->dreg;
10421 type_from_op (cfg, ins, sp [0], sp [1]);
10423 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10425 /* Use the immediate opcodes if possible */
10426 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10427 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10428 if (imm_opcode != -1) {
10429 ins->opcode = imm_opcode;
10430 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10433 NULLIFY_INS (sp [1]);
10437 MONO_ADD_INS ((cfg)->cbb, (ins));
10439 *sp++ = mono_decompose_opcode (cfg, ins);
10456 MONO_INST_NEW (cfg, ins, (*ip));
10458 ins->sreg1 = sp [0]->dreg;
10459 ins->sreg2 = sp [1]->dreg;
10460 type_from_op (cfg, ins, sp [0], sp [1]);
10462 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10463 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10465 /* FIXME: Pass opcode to is_inst_imm */
10467 /* Use the immediate opcodes if possible */
10468 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10469 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10470 if (imm_opcode != -1) {
10471 ins->opcode = imm_opcode;
10472 if (sp [1]->opcode == OP_I8CONST) {
10473 #if SIZEOF_REGISTER == 8
10474 ins->inst_imm = sp [1]->inst_l;
10476 ins->inst_ls_word = sp [1]->inst_ls_word;
10477 ins->inst_ms_word = sp [1]->inst_ms_word;
10481 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10484 /* Might be followed by an instruction added by add_widen_op */
10485 if (sp [1]->next == NULL)
10486 NULLIFY_INS (sp [1]);
10489 MONO_ADD_INS ((cfg)->cbb, (ins));
10491 *sp++ = mono_decompose_opcode (cfg, ins);
10504 case CEE_CONV_OVF_I8:
10505 case CEE_CONV_OVF_U8:
10506 case CEE_CONV_R_UN:
10509 /* Special case this earlier so we have long constants in the IR */
10510 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10511 int data = sp [-1]->inst_c0;
10512 sp [-1]->opcode = OP_I8CONST;
10513 sp [-1]->type = STACK_I8;
10514 #if SIZEOF_REGISTER == 8
10515 if ((*ip) == CEE_CONV_U8)
10516 sp [-1]->inst_c0 = (guint32)data;
10518 sp [-1]->inst_c0 = data;
10520 sp [-1]->inst_ls_word = data;
10521 if ((*ip) == CEE_CONV_U8)
10522 sp [-1]->inst_ms_word = 0;
10524 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10526 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10533 case CEE_CONV_OVF_I4:
10534 case CEE_CONV_OVF_I1:
10535 case CEE_CONV_OVF_I2:
10536 case CEE_CONV_OVF_I:
10537 case CEE_CONV_OVF_U:
10540 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10541 ADD_UNOP (CEE_CONV_OVF_I8);
10548 case CEE_CONV_OVF_U1:
10549 case CEE_CONV_OVF_U2:
10550 case CEE_CONV_OVF_U4:
10553 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10554 ADD_UNOP (CEE_CONV_OVF_U8);
10561 case CEE_CONV_OVF_I1_UN:
10562 case CEE_CONV_OVF_I2_UN:
10563 case CEE_CONV_OVF_I4_UN:
10564 case CEE_CONV_OVF_I8_UN:
10565 case CEE_CONV_OVF_U1_UN:
10566 case CEE_CONV_OVF_U2_UN:
10567 case CEE_CONV_OVF_U4_UN:
10568 case CEE_CONV_OVF_U8_UN:
10569 case CEE_CONV_OVF_I_UN:
10570 case CEE_CONV_OVF_U_UN:
10577 CHECK_CFG_EXCEPTION;
10581 case CEE_ADD_OVF_UN:
10583 case CEE_MUL_OVF_UN:
10585 case CEE_SUB_OVF_UN:
10591 GSHAREDVT_FAILURE (*ip);
10594 token = read32 (ip + 1);
10595 klass = mini_get_class (method, token, generic_context);
10596 CHECK_TYPELOAD (klass);
10598 if (generic_class_is_reference_type (cfg, klass)) {
10599 MonoInst *store, *load;
10600 int dreg = alloc_ireg_ref (cfg);
10602 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10603 load->flags |= ins_flag;
10604 MONO_ADD_INS (cfg->cbb, load);
10606 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10607 store->flags |= ins_flag;
10608 MONO_ADD_INS (cfg->cbb, store);
10610 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10611 emit_write_barrier (cfg, sp [0], sp [1]);
10613 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10619 int loc_index = -1;
10625 token = read32 (ip + 1);
10626 klass = mini_get_class (method, token, generic_context);
10627 CHECK_TYPELOAD (klass);
10629 /* Optimize the common ldobj+stloc combination */
10632 loc_index = ip [6];
10639 loc_index = ip [5] - CEE_STLOC_0;
10646 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10647 CHECK_LOCAL (loc_index);
10649 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10650 ins->dreg = cfg->locals [loc_index]->dreg;
10651 ins->flags |= ins_flag;
10654 if (ins_flag & MONO_INST_VOLATILE) {
10655 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10656 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10662 /* Optimize the ldobj+stobj combination */
10663 /* The reference case ends up being a load+store anyway */
10664 /* Skip this if the operation is volatile. */
10665 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10670 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10677 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10678 ins->flags |= ins_flag;
10681 if (ins_flag & MONO_INST_VOLATILE) {
10682 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10683 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10692 CHECK_STACK_OVF (1);
10694 n = read32 (ip + 1);
10696 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10697 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10698 ins->type = STACK_OBJ;
10701 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10702 MonoInst *iargs [1];
10703 char *str = (char *)mono_method_get_wrapper_data (method, n);
10705 if (cfg->compile_aot)
10706 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10708 EMIT_NEW_PCONST (cfg, iargs [0], str);
10709 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10711 if (cfg->opt & MONO_OPT_SHARED) {
10712 MonoInst *iargs [3];
10714 if (cfg->compile_aot) {
10715 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10717 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10718 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10719 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10720 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10721 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10723 if (cfg->cbb->out_of_line) {
10724 MonoInst *iargs [2];
10726 if (image == mono_defaults.corlib) {
10728 * Avoid relocations in AOT and save some space by using a
10729 * version of helper_ldstr specialized to mscorlib.
10731 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10732 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10734 /* Avoid creating the string object */
10735 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10736 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10737 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10741 if (cfg->compile_aot) {
10742 NEW_LDSTRCONST (cfg, ins, image, n);
10744 MONO_ADD_INS (cfg->cbb, ins);
10747 NEW_PCONST (cfg, ins, NULL);
10748 ins->type = STACK_OBJ;
10749 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10751 OUT_OF_MEMORY_FAILURE;
10754 MONO_ADD_INS (cfg->cbb, ins);
10763 MonoInst *iargs [2];
10764 MonoMethodSignature *fsig;
10767 MonoInst *vtable_arg = NULL;
10770 token = read32 (ip + 1);
10771 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10774 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10777 mono_save_token_info (cfg, image, token, cmethod);
10779 if (!mono_class_init (cmethod->klass))
10780 TYPE_LOAD_ERROR (cmethod->klass);
10782 context_used = mini_method_check_context_used (cfg, cmethod);
10784 if (mono_security_core_clr_enabled ())
10785 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10787 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10788 emit_class_init (cfg, cmethod->klass);
10789 CHECK_TYPELOAD (cmethod->klass);
10793 if (cfg->gsharedvt) {
10794 if (mini_is_gsharedvt_variable_signature (sig))
10795 GSHAREDVT_FAILURE (*ip);
10799 n = fsig->param_count;
10803 * Generate smaller code for the common newobj <exception> instruction in
10804 * argument checking code.
10806 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10807 is_exception_class (cmethod->klass) && n <= 2 &&
10808 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10809 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10810 MonoInst *iargs [3];
10814 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10817 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10820 iargs [1] = sp [0];
10821 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10824 iargs [1] = sp [0];
10825 iargs [2] = sp [1];
10826 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10829 g_assert_not_reached ();
10837 /* move the args to allow room for 'this' in the first position */
10843 /* check_call_signature () requires sp[0] to be set */
10844 this_ins.type = STACK_OBJ;
10845 sp [0] = &this_ins;
10846 if (check_call_signature (cfg, fsig, sp))
10851 if (mini_class_is_system_array (cmethod->klass)) {
10852 *sp = emit_get_rgctx_method (cfg, context_used,
10853 cmethod, MONO_RGCTX_INFO_METHOD);
10855 /* Avoid varargs in the common case */
10856 if (fsig->param_count == 1)
10857 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10858 else if (fsig->param_count == 2)
10859 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10860 else if (fsig->param_count == 3)
10861 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10862 else if (fsig->param_count == 4)
10863 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10865 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10866 } else if (cmethod->string_ctor) {
10867 g_assert (!context_used);
10868 g_assert (!vtable_arg);
10869 /* we simply pass a null pointer */
10870 EMIT_NEW_PCONST (cfg, *sp, NULL);
10871 /* now call the string ctor */
10872 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10874 if (cmethod->klass->valuetype) {
10875 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10876 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10877 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10882 * The code generated by mini_emit_virtual_call () expects
10883 * iargs [0] to be a boxed instance, but luckily the vcall
10884 * will be transformed into a normal call there.
10886 } else if (context_used) {
10887 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10890 MonoVTable *vtable = NULL;
10892 if (!cfg->compile_aot)
10893 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10894 CHECK_TYPELOAD (cmethod->klass);
10897 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10898 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10899 * As a workaround, we call class cctors before allocating objects.
10901 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10902 emit_class_init (cfg, cmethod->klass);
10903 if (cfg->verbose_level > 2)
10904 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10905 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10908 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10911 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10914 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10916 /* Now call the actual ctor */
10917 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10918 CHECK_CFG_EXCEPTION;
10921 if (alloc == NULL) {
10923 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10924 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10932 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10933 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10936 case CEE_CASTCLASS:
10940 token = read32 (ip + 1);
10941 klass = mini_get_class (method, token, generic_context);
10942 CHECK_TYPELOAD (klass);
10943 if (sp [0]->type != STACK_OBJ)
10946 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10947 CHECK_CFG_EXCEPTION;
10956 token = read32 (ip + 1);
10957 klass = mini_get_class (method, token, generic_context);
10958 CHECK_TYPELOAD (klass);
10959 if (sp [0]->type != STACK_OBJ)
10962 context_used = mini_class_check_context_used (cfg, klass);
10964 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10965 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10966 MonoInst *args [3];
10973 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10976 idx = get_castclass_cache_idx (cfg);
10977 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10979 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10982 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10983 MonoMethod *mono_isinst;
10984 MonoInst *iargs [1];
10987 mono_isinst = mono_marshal_get_isinst (klass);
10988 iargs [0] = sp [0];
10990 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10991 iargs, ip, cfg->real_offset, TRUE);
10992 CHECK_CFG_EXCEPTION;
10993 g_assert (costs > 0);
10996 cfg->real_offset += 5;
11000 inline_costs += costs;
11003 ins = handle_isinst (cfg, klass, *sp, context_used);
11004 CHECK_CFG_EXCEPTION;
11010 case CEE_UNBOX_ANY: {
11011 MonoInst *res, *addr;
11016 token = read32 (ip + 1);
11017 klass = mini_get_class (method, token, generic_context);
11018 CHECK_TYPELOAD (klass);
11020 mono_save_token_info (cfg, image, token, klass);
11022 context_used = mini_class_check_context_used (cfg, klass);
11024 if (mini_is_gsharedvt_klass (klass)) {
11025 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11027 } else if (generic_class_is_reference_type (cfg, klass)) {
11028 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
11029 CHECK_CFG_EXCEPTION;
11030 } else if (mono_class_is_nullable (klass)) {
11031 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11033 addr = handle_unbox (cfg, klass, sp, context_used);
11035 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11046 MonoClass *enum_class;
11047 MonoMethod *has_flag;
11053 token = read32 (ip + 1);
11054 klass = mini_get_class (method, token, generic_context);
11055 CHECK_TYPELOAD (klass);
11057 mono_save_token_info (cfg, image, token, klass);
11059 context_used = mini_class_check_context_used (cfg, klass);
11061 if (generic_class_is_reference_type (cfg, klass)) {
11067 if (klass == mono_defaults.void_class)
11069 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11071 /* frequent check in generic code: box (struct), brtrue */
11076 * <push int/long ptr>
11079 * constrained. MyFlags
11080 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11082 * If we find this sequence and the operand types on box and constrained
11083 * are equal, we can emit a specialized instruction sequence instead of
11084 * the very slow HasFlag () call.
11086 if ((cfg->opt & MONO_OPT_INTRINS) &&
11087 /* Cheap checks first. */
11088 ip + 5 + 6 + 5 < end &&
11089 ip [5] == CEE_PREFIX1 &&
11090 ip [6] == CEE_CONSTRAINED_ &&
11091 ip [11] == CEE_CALLVIRT &&
11092 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11093 mono_class_is_enum (klass) &&
11094 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11095 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11096 has_flag->klass == mono_defaults.enum_class &&
11097 !strcmp (has_flag->name, "HasFlag") &&
11098 has_flag->signature->hasthis &&
11099 has_flag->signature->param_count == 1) {
11100 CHECK_TYPELOAD (enum_class);
11102 if (enum_class == klass) {
11103 MonoInst *enum_this, *enum_flag;
11108 enum_this = sp [0];
11109 enum_flag = sp [1];
11111 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11116 // FIXME: LLVM can't handle the inconsistent bb linking
11117 if (!mono_class_is_nullable (klass) &&
11118 !mini_is_gsharedvt_klass (klass) &&
11119 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11120 (ip [5] == CEE_BRTRUE ||
11121 ip [5] == CEE_BRTRUE_S ||
11122 ip [5] == CEE_BRFALSE ||
11123 ip [5] == CEE_BRFALSE_S)) {
11124 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11126 MonoBasicBlock *true_bb, *false_bb;
11130 if (cfg->verbose_level > 3) {
11131 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11132 printf ("<box+brtrue opt>\n");
11137 case CEE_BRFALSE_S:
11140 target = ip + 1 + (signed char)(*ip);
11147 target = ip + 4 + (gint)(read32 (ip));
11151 g_assert_not_reached ();
11155 * We need to link both bblocks, since it is needed for handling stack
11156 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11157 * Branching to only one of them would lead to inconsistencies, so
11158 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11160 GET_BBLOCK (cfg, true_bb, target);
11161 GET_BBLOCK (cfg, false_bb, ip);
11163 mono_link_bblock (cfg, cfg->cbb, true_bb);
11164 mono_link_bblock (cfg, cfg->cbb, false_bb);
11166 if (sp != stack_start) {
11167 handle_stack_args (cfg, stack_start, sp - stack_start);
11169 CHECK_UNVERIFIABLE (cfg);
11172 if (COMPILE_LLVM (cfg)) {
11173 dreg = alloc_ireg (cfg);
11174 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11175 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11177 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11179 /* The JIT can't eliminate the iconst+compare */
11180 MONO_INST_NEW (cfg, ins, OP_BR);
11181 ins->inst_target_bb = is_true ? true_bb : false_bb;
11182 MONO_ADD_INS (cfg->cbb, ins);
11185 start_new_bblock = 1;
11189 *sp++ = handle_box (cfg, val, klass, context_used);
11191 CHECK_CFG_EXCEPTION;
11200 token = read32 (ip + 1);
11201 klass = mini_get_class (method, token, generic_context);
11202 CHECK_TYPELOAD (klass);
11204 mono_save_token_info (cfg, image, token, klass);
11206 context_used = mini_class_check_context_used (cfg, klass);
11208 if (mono_class_is_nullable (klass)) {
11211 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11212 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11216 ins = handle_unbox (cfg, klass, sp, context_used);
11229 MonoClassField *field;
11230 #ifndef DISABLE_REMOTING
11234 gboolean is_instance;
11236 gpointer addr = NULL;
11237 gboolean is_special_static;
11239 MonoInst *store_val = NULL;
11240 MonoInst *thread_ins;
11243 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11245 if (op == CEE_STFLD) {
11248 store_val = sp [1];
11253 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11255 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11258 if (op == CEE_STSFLD) {
11261 store_val = sp [0];
11266 token = read32 (ip + 1);
11267 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11268 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11269 klass = field->parent;
11272 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11275 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11276 FIELD_ACCESS_FAILURE (method, field);
11277 mono_class_init (klass);
11279 /* if the class is Critical then transparent code cannot access it's fields */
11280 if (!is_instance && mono_security_core_clr_enabled ())
11281 ensure_method_is_allowed_to_access_field (cfg, method, field);
11283 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11284 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11285 if (mono_security_core_clr_enabled ())
11286 ensure_method_is_allowed_to_access_field (cfg, method, field);
11289 ftype = mono_field_get_type (field);
11292 * LDFLD etc. is usable on static fields as well, so convert those cases to
11295 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11307 g_assert_not_reached ();
11309 is_instance = FALSE;
11312 context_used = mini_class_check_context_used (cfg, klass);
11314 /* INSTANCE CASE */
11316 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11317 if (op == CEE_STFLD) {
11318 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11320 #ifndef DISABLE_REMOTING
11321 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11322 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11323 MonoInst *iargs [5];
11325 GSHAREDVT_FAILURE (op);
11327 iargs [0] = sp [0];
11328 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11329 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11330 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11332 iargs [4] = sp [1];
11334 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11335 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11336 iargs, ip, cfg->real_offset, TRUE);
11337 CHECK_CFG_EXCEPTION;
11338 g_assert (costs > 0);
11340 cfg->real_offset += 5;
11342 inline_costs += costs;
11344 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11351 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11353 if (mini_is_gsharedvt_klass (klass)) {
11354 MonoInst *offset_ins;
11356 context_used = mini_class_check_context_used (cfg, klass);
11358 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11359 /* The value is offset by 1 */
11360 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11361 dreg = alloc_ireg_mp (cfg);
11362 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11363 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11364 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11366 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11368 if (sp [0]->opcode != OP_LDADDR)
11369 store->flags |= MONO_INST_FAULT;
11371 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11372 /* insert call to write barrier */
11376 dreg = alloc_ireg_mp (cfg);
11377 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11378 emit_write_barrier (cfg, ptr, sp [1]);
11381 store->flags |= ins_flag;
11388 #ifndef DISABLE_REMOTING
11389 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11390 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11391 MonoInst *iargs [4];
11393 GSHAREDVT_FAILURE (op);
11395 iargs [0] = sp [0];
11396 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11397 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11398 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11399 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11400 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11401 iargs, ip, cfg->real_offset, TRUE);
11402 CHECK_CFG_EXCEPTION;
11403 g_assert (costs > 0);
11405 cfg->real_offset += 5;
11409 inline_costs += costs;
11411 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11417 if (sp [0]->type == STACK_VTYPE) {
11420 /* Have to compute the address of the variable */
11422 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11424 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11426 g_assert (var->klass == klass);
11428 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11432 if (op == CEE_LDFLDA) {
11433 if (sp [0]->type == STACK_OBJ) {
11434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11435 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11438 dreg = alloc_ireg_mp (cfg);
11440 if (mini_is_gsharedvt_klass (klass)) {
11441 MonoInst *offset_ins;
11443 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11444 /* The value is offset by 1 */
11445 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11446 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11448 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11450 ins->klass = mono_class_from_mono_type (field->type);
11451 ins->type = STACK_MP;
11456 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11458 if (mini_is_gsharedvt_klass (klass)) {
11459 MonoInst *offset_ins;
11461 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11462 /* The value is offset by 1 */
11463 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11464 dreg = alloc_ireg_mp (cfg);
11465 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11466 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11468 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11470 load->flags |= ins_flag;
11471 if (sp [0]->opcode != OP_LDADDR)
11472 load->flags |= MONO_INST_FAULT;
11484 context_used = mini_class_check_context_used (cfg, klass);
11486 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11489 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11490 * to be called here.
11492 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11493 mono_class_vtable (cfg->domain, klass);
11494 CHECK_TYPELOAD (klass);
11496 mono_domain_lock (cfg->domain);
11497 if (cfg->domain->special_static_fields)
11498 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11499 mono_domain_unlock (cfg->domain);
11501 is_special_static = mono_class_field_is_special_static (field);
11503 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11504 thread_ins = mono_get_thread_intrinsic (cfg);
11508 /* Generate IR to compute the field address */
11509 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11511 * Fast access to TLS data
11512 * Inline version of get_thread_static_data () in
11516 int idx, static_data_reg, array_reg, dreg;
11518 GSHAREDVT_FAILURE (op);
11520 MONO_ADD_INS (cfg->cbb, thread_ins);
11521 static_data_reg = alloc_ireg (cfg);
11522 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11524 if (cfg->compile_aot) {
11525 int offset_reg, offset2_reg, idx_reg;
11527 /* For TLS variables, this will return the TLS offset */
11528 EMIT_NEW_SFLDACONST (cfg, ins, field);
11529 offset_reg = ins->dreg;
11530 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11531 idx_reg = alloc_ireg (cfg);
11532 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11534 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11535 array_reg = alloc_ireg (cfg);
11536 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11537 offset2_reg = alloc_ireg (cfg);
11538 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11540 dreg = alloc_ireg (cfg);
11541 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11543 offset = (gsize)addr & 0x7fffffff;
11544 idx = offset & 0x3f;
11546 array_reg = alloc_ireg (cfg);
11547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11548 dreg = alloc_ireg (cfg);
11549 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11551 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11552 (cfg->compile_aot && is_special_static) ||
11553 (context_used && is_special_static)) {
11554 MonoInst *iargs [2];
11556 g_assert (field->parent);
11557 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11558 if (context_used) {
11559 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11560 field, MONO_RGCTX_INFO_CLASS_FIELD);
11562 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11564 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11565 } else if (context_used) {
11566 MonoInst *static_data;
11569 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11570 method->klass->name_space, method->klass->name, method->name,
11571 depth, field->offset);
11574 if (mono_class_needs_cctor_run (klass, method))
11575 emit_class_init (cfg, klass);
11578 * The pointer we're computing here is
11580 * super_info.static_data + field->offset
11582 static_data = emit_get_rgctx_klass (cfg, context_used,
11583 klass, MONO_RGCTX_INFO_STATIC_DATA);
11585 if (mini_is_gsharedvt_klass (klass)) {
11586 MonoInst *offset_ins;
11588 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11589 /* The value is offset by 1 */
11590 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11591 dreg = alloc_ireg_mp (cfg);
11592 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11593 } else if (field->offset == 0) {
11596 int addr_reg = mono_alloc_preg (cfg);
11597 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11599 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11600 MonoInst *iargs [2];
11602 g_assert (field->parent);
11603 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11604 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11605 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11607 MonoVTable *vtable = NULL;
11609 if (!cfg->compile_aot)
11610 vtable = mono_class_vtable (cfg->domain, klass);
11611 CHECK_TYPELOAD (klass);
11614 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11615 if (!(g_slist_find (class_inits, klass))) {
11616 emit_class_init (cfg, klass);
11617 if (cfg->verbose_level > 2)
11618 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11619 class_inits = g_slist_prepend (class_inits, klass);
11622 if (cfg->run_cctors) {
11624 /* This makes so that inline cannot trigger */
11625 /* .cctors: too many apps depend on them */
11626 /* running with a specific order... */
11628 if (! vtable->initialized)
11629 INLINE_FAILURE ("class init");
11630 ex = mono_runtime_class_init_full (vtable, FALSE);
11632 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11633 mono_error_set_exception_instance (&cfg->error, ex);
11634 g_assert_not_reached ();
11635 goto exception_exit;
11639 if (cfg->compile_aot)
11640 EMIT_NEW_SFLDACONST (cfg, ins, field);
11643 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11645 EMIT_NEW_PCONST (cfg, ins, addr);
11648 MonoInst *iargs [1];
11649 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11650 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11654 /* Generate IR to do the actual load/store operation */
11656 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11657 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11658 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11661 if (op == CEE_LDSFLDA) {
11662 ins->klass = mono_class_from_mono_type (ftype);
11663 ins->type = STACK_PTR;
11665 } else if (op == CEE_STSFLD) {
11668 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11669 store->flags |= ins_flag;
11671 gboolean is_const = FALSE;
11672 MonoVTable *vtable = NULL;
11673 gpointer addr = NULL;
11675 if (!context_used) {
11676 vtable = mono_class_vtable (cfg->domain, klass);
11677 CHECK_TYPELOAD (klass);
11679 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11680 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11681 int ro_type = ftype->type;
11683 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11684 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11685 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11688 GSHAREDVT_FAILURE (op);
11690 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11693 case MONO_TYPE_BOOLEAN:
11695 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11699 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11702 case MONO_TYPE_CHAR:
11704 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11708 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11713 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11717 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11722 case MONO_TYPE_PTR:
11723 case MONO_TYPE_FNPTR:
11724 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11725 type_to_eval_stack_type ((cfg), field->type, *sp);
11728 case MONO_TYPE_STRING:
11729 case MONO_TYPE_OBJECT:
11730 case MONO_TYPE_CLASS:
11731 case MONO_TYPE_SZARRAY:
11732 case MONO_TYPE_ARRAY:
11733 if (!mono_gc_is_moving ()) {
11734 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11735 type_to_eval_stack_type ((cfg), field->type, *sp);
11743 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11748 case MONO_TYPE_VALUETYPE:
11758 CHECK_STACK_OVF (1);
11760 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11761 load->flags |= ins_flag;
11767 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11768 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11769 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11780 token = read32 (ip + 1);
11781 klass = mini_get_class (method, token, generic_context);
11782 CHECK_TYPELOAD (klass);
11783 if (ins_flag & MONO_INST_VOLATILE) {
11784 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11785 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11787 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11788 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11789 ins->flags |= ins_flag;
11790 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11791 generic_class_is_reference_type (cfg, klass)) {
11792 /* insert call to write barrier */
11793 emit_write_barrier (cfg, sp [0], sp [1]);
11805 const char *data_ptr;
11807 guint32 field_token;
11813 token = read32 (ip + 1);
11815 klass = mini_get_class (method, token, generic_context);
11816 CHECK_TYPELOAD (klass);
11818 context_used = mini_class_check_context_used (cfg, klass);
11820 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11821 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11822 ins->sreg1 = sp [0]->dreg;
11823 ins->type = STACK_I4;
11824 ins->dreg = alloc_ireg (cfg);
11825 MONO_ADD_INS (cfg->cbb, ins);
11826 *sp = mono_decompose_opcode (cfg, ins);
11829 if (context_used) {
11830 MonoInst *args [3];
11831 MonoClass *array_class = mono_array_class_get (klass, 1);
11832 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11834 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11837 args [0] = emit_get_rgctx_klass (cfg, context_used,
11838 array_class, MONO_RGCTX_INFO_VTABLE);
11843 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11845 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11847 if (cfg->opt & MONO_OPT_SHARED) {
11848 /* Decompose now to avoid problems with references to the domainvar */
11849 MonoInst *iargs [3];
11851 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11852 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11853 iargs [2] = sp [0];
11855 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11857 /* Decompose later since it is needed by abcrem */
11858 MonoClass *array_type = mono_array_class_get (klass, 1);
11859 mono_class_vtable (cfg->domain, array_type);
11860 CHECK_TYPELOAD (array_type);
11862 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11863 ins->dreg = alloc_ireg_ref (cfg);
11864 ins->sreg1 = sp [0]->dreg;
11865 ins->inst_newa_class = klass;
11866 ins->type = STACK_OBJ;
11867 ins->klass = array_type;
11868 MONO_ADD_INS (cfg->cbb, ins);
11869 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11870 cfg->cbb->has_array_access = TRUE;
11872 /* Needed so mono_emit_load_get_addr () gets called */
11873 mono_get_got_var (cfg);
11883 * we inline/optimize the initialization sequence if possible.
11884 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11885 * for small sizes open code the memcpy
11886 * ensure the rva field is big enough
11888 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11889 MonoMethod *memcpy_method = get_memcpy_method ();
11890 MonoInst *iargs [3];
11891 int add_reg = alloc_ireg_mp (cfg);
11893 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11894 if (cfg->compile_aot) {
11895 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11897 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11899 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11900 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11909 if (sp [0]->type != STACK_OBJ)
11912 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11913 ins->dreg = alloc_preg (cfg);
11914 ins->sreg1 = sp [0]->dreg;
11915 ins->type = STACK_I4;
11916 /* This flag will be inherited by the decomposition */
11917 ins->flags |= MONO_INST_FAULT;
11918 MONO_ADD_INS (cfg->cbb, ins);
11919 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11920 cfg->cbb->has_array_access = TRUE;
11928 if (sp [0]->type != STACK_OBJ)
11931 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11933 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11934 CHECK_TYPELOAD (klass);
11935 /* we need to make sure that this array is exactly the type it needs
11936 * to be for correctness. the wrappers are lax with their usage
11937 * so we need to ignore them here
11939 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11940 MonoClass *array_class = mono_array_class_get (klass, 1);
11941 mini_emit_check_array_type (cfg, sp [0], array_class);
11942 CHECK_TYPELOAD (array_class);
11946 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11951 case CEE_LDELEM_I1:
11952 case CEE_LDELEM_U1:
11953 case CEE_LDELEM_I2:
11954 case CEE_LDELEM_U2:
11955 case CEE_LDELEM_I4:
11956 case CEE_LDELEM_U4:
11957 case CEE_LDELEM_I8:
11959 case CEE_LDELEM_R4:
11960 case CEE_LDELEM_R8:
11961 case CEE_LDELEM_REF: {
11967 if (*ip == CEE_LDELEM) {
11969 token = read32 (ip + 1);
11970 klass = mini_get_class (method, token, generic_context);
11971 CHECK_TYPELOAD (klass);
11972 mono_class_init (klass);
11975 klass = array_access_to_klass (*ip);
11977 if (sp [0]->type != STACK_OBJ)
11980 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11982 if (mini_is_gsharedvt_variable_klass (klass)) {
11983 // FIXME-VT: OP_ICONST optimization
11984 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11985 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11986 ins->opcode = OP_LOADV_MEMBASE;
11987 } else if (sp [1]->opcode == OP_ICONST) {
11988 int array_reg = sp [0]->dreg;
11989 int index_reg = sp [1]->dreg;
11990 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11992 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11993 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11995 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11996 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11998 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11999 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12002 if (*ip == CEE_LDELEM)
12009 case CEE_STELEM_I1:
12010 case CEE_STELEM_I2:
12011 case CEE_STELEM_I4:
12012 case CEE_STELEM_I8:
12013 case CEE_STELEM_R4:
12014 case CEE_STELEM_R8:
12015 case CEE_STELEM_REF:
12020 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12022 if (*ip == CEE_STELEM) {
12024 token = read32 (ip + 1);
12025 klass = mini_get_class (method, token, generic_context);
12026 CHECK_TYPELOAD (klass);
12027 mono_class_init (klass);
12030 klass = array_access_to_klass (*ip);
12032 if (sp [0]->type != STACK_OBJ)
12035 emit_array_store (cfg, klass, sp, TRUE);
12037 if (*ip == CEE_STELEM)
12044 case CEE_CKFINITE: {
12048 if (cfg->llvm_only) {
12049 MonoInst *iargs [1];
12051 iargs [0] = sp [0];
12052 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12054 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12055 ins->sreg1 = sp [0]->dreg;
12056 ins->dreg = alloc_freg (cfg);
12057 ins->type = STACK_R8;
12058 MONO_ADD_INS (cfg->cbb, ins);
12060 *sp++ = mono_decompose_opcode (cfg, ins);
12066 case CEE_REFANYVAL: {
12067 MonoInst *src_var, *src;
12069 int klass_reg = alloc_preg (cfg);
12070 int dreg = alloc_preg (cfg);
12072 GSHAREDVT_FAILURE (*ip);
12075 MONO_INST_NEW (cfg, ins, *ip);
12078 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12079 CHECK_TYPELOAD (klass);
12081 context_used = mini_class_check_context_used (cfg, klass);
12084 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12086 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12087 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12088 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12090 if (context_used) {
12091 MonoInst *klass_ins;
12093 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12094 klass, MONO_RGCTX_INFO_KLASS);
12097 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12098 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12100 mini_emit_class_check (cfg, klass_reg, klass);
12102 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12103 ins->type = STACK_MP;
12104 ins->klass = klass;
12109 case CEE_MKREFANY: {
12110 MonoInst *loc, *addr;
12112 GSHAREDVT_FAILURE (*ip);
12115 MONO_INST_NEW (cfg, ins, *ip);
12118 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12119 CHECK_TYPELOAD (klass);
12121 context_used = mini_class_check_context_used (cfg, klass);
12123 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12124 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12126 if (context_used) {
12127 MonoInst *const_ins;
12128 int type_reg = alloc_preg (cfg);
12130 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12131 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12132 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12133 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12134 } else if (cfg->compile_aot) {
12135 int const_reg = alloc_preg (cfg);
12136 int type_reg = alloc_preg (cfg);
12138 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12141 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12143 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12144 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12146 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12148 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12149 ins->type = STACK_VTYPE;
12150 ins->klass = mono_defaults.typed_reference_class;
12155 case CEE_LDTOKEN: {
12157 MonoClass *handle_class;
12159 CHECK_STACK_OVF (1);
12162 n = read32 (ip + 1);
12164 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12165 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12166 handle = mono_method_get_wrapper_data (method, n);
12167 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12168 if (handle_class == mono_defaults.typehandle_class)
12169 handle = &((MonoClass*)handle)->byval_arg;
12172 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12177 mono_class_init (handle_class);
12178 if (cfg->gshared) {
12179 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12180 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12181 /* This case handles ldtoken
12182 of an open type, like for
12185 } else if (handle_class == mono_defaults.typehandle_class) {
12186 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12187 } else if (handle_class == mono_defaults.fieldhandle_class)
12188 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12189 else if (handle_class == mono_defaults.methodhandle_class)
12190 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12192 g_assert_not_reached ();
12195 if ((cfg->opt & MONO_OPT_SHARED) &&
12196 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12197 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12198 MonoInst *addr, *vtvar, *iargs [3];
12199 int method_context_used;
12201 method_context_used = mini_method_check_context_used (cfg, method);
12203 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12205 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12206 EMIT_NEW_ICONST (cfg, iargs [1], n);
12207 if (method_context_used) {
12208 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12209 method, MONO_RGCTX_INFO_METHOD);
12210 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12212 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12213 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12215 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12217 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12219 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12221 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12222 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12223 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12224 (cmethod->klass == mono_defaults.systemtype_class) &&
12225 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12226 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12228 mono_class_init (tclass);
12229 if (context_used) {
12230 ins = emit_get_rgctx_klass (cfg, context_used,
12231 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12232 } else if (cfg->compile_aot) {
12233 if (method->wrapper_type) {
12234 mono_error_init (&error); //got to do it since there are multiple conditionals below
12235 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12236 /* Special case for static synchronized wrappers */
12237 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12239 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12240 /* FIXME: n is not a normal token */
12242 EMIT_NEW_PCONST (cfg, ins, NULL);
12245 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12249 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &error);
12250 mono_error_raise_exception (&error); /* FIXME don't raise here */
12252 EMIT_NEW_PCONST (cfg, ins, rt);
12254 ins->type = STACK_OBJ;
12255 ins->klass = cmethod->klass;
12258 MonoInst *addr, *vtvar;
12260 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12262 if (context_used) {
12263 if (handle_class == mono_defaults.typehandle_class) {
12264 ins = emit_get_rgctx_klass (cfg, context_used,
12265 mono_class_from_mono_type ((MonoType *)handle),
12266 MONO_RGCTX_INFO_TYPE);
12267 } else if (handle_class == mono_defaults.methodhandle_class) {
12268 ins = emit_get_rgctx_method (cfg, context_used,
12269 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12270 } else if (handle_class == mono_defaults.fieldhandle_class) {
12271 ins = emit_get_rgctx_field (cfg, context_used,
12272 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12274 g_assert_not_reached ();
12276 } else if (cfg->compile_aot) {
12277 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12279 EMIT_NEW_PCONST (cfg, ins, handle);
12281 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12282 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12283 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12293 MONO_INST_NEW (cfg, ins, OP_THROW);
12295 ins->sreg1 = sp [0]->dreg;
12297 cfg->cbb->out_of_line = TRUE;
12298 MONO_ADD_INS (cfg->cbb, ins);
12299 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12300 MONO_ADD_INS (cfg->cbb, ins);
12303 link_bblock (cfg, cfg->cbb, end_bblock);
12304 start_new_bblock = 1;
12305 /* This can complicate code generation for llvm since the return value might not be defined */
12306 if (COMPILE_LLVM (cfg))
12307 INLINE_FAILURE ("throw");
12309 case CEE_ENDFINALLY:
12310 /* mono_save_seq_point_info () depends on this */
12311 if (sp != stack_start)
12312 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12313 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12314 MONO_ADD_INS (cfg->cbb, ins);
12316 start_new_bblock = 1;
12319 * Control will leave the method so empty the stack, otherwise
12320 * the next basic block will start with a nonempty stack.
12322 while (sp != stack_start) {
12327 case CEE_LEAVE_S: {
12330 if (*ip == CEE_LEAVE) {
12332 target = ip + 5 + (gint32)read32(ip + 1);
12335 target = ip + 2 + (signed char)(ip [1]);
12338 /* empty the stack */
12339 while (sp != stack_start) {
12344 * If this leave statement is in a catch block, check for a
12345 * pending exception, and rethrow it if necessary.
12346 * We avoid doing this in runtime invoke wrappers, since those are called
12347 * by native code which excepts the wrapper to catch all exceptions.
12349 for (i = 0; i < header->num_clauses; ++i) {
12350 MonoExceptionClause *clause = &header->clauses [i];
12353 * Use <= in the final comparison to handle clauses with multiple
12354 * leave statements, like in bug #78024.
12355 * The ordering of the exception clauses guarantees that we find the
12356 * innermost clause.
12358 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12360 MonoBasicBlock *dont_throw;
12365 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12368 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12370 NEW_BBLOCK (cfg, dont_throw);
12373 * Currently, we always rethrow the abort exception, despite the
12374 * fact that this is not correct. See thread6.cs for an example.
12375 * But propagating the abort exception is more important than
12376 * getting the sematics right.
12378 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12380 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12382 MONO_START_BB (cfg, dont_throw);
12387 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12390 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12392 MonoExceptionClause *clause;
12394 for (tmp = handlers; tmp; tmp = tmp->next) {
12395 clause = (MonoExceptionClause *)tmp->data;
12396 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12398 link_bblock (cfg, cfg->cbb, tblock);
12399 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12400 ins->inst_target_bb = tblock;
12401 ins->inst_eh_block = clause;
12402 MONO_ADD_INS (cfg->cbb, ins);
12403 cfg->cbb->has_call_handler = 1;
12404 if (COMPILE_LLVM (cfg)) {
12405 MonoBasicBlock *target_bb;
12408 * Link the finally bblock with the target, since it will
12409 * conceptually branch there.
12411 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12412 GET_BBLOCK (cfg, target_bb, target);
12413 link_bblock (cfg, tblock, target_bb);
12416 g_list_free (handlers);
12419 MONO_INST_NEW (cfg, ins, OP_BR);
12420 MONO_ADD_INS (cfg->cbb, ins);
12421 GET_BBLOCK (cfg, tblock, target);
12422 link_bblock (cfg, cfg->cbb, tblock);
12423 ins->inst_target_bb = tblock;
12425 start_new_bblock = 1;
12427 if (*ip == CEE_LEAVE)
12436 * Mono specific opcodes
12438 case MONO_CUSTOM_PREFIX: {
12440 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12444 case CEE_MONO_ICALL: {
12446 MonoJitICallInfo *info;
12448 token = read32 (ip + 2);
12449 func = mono_method_get_wrapper_data (method, token);
12450 info = mono_find_jit_icall_by_addr (func);
12452 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12455 CHECK_STACK (info->sig->param_count);
12456 sp -= info->sig->param_count;
12458 ins = mono_emit_jit_icall (cfg, info->func, sp);
12459 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12463 inline_costs += 10 * num_calls++;
12467 case CEE_MONO_LDPTR_CARD_TABLE:
12468 case CEE_MONO_LDPTR_NURSERY_START:
12469 case CEE_MONO_LDPTR_NURSERY_BITS:
12470 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12471 CHECK_STACK_OVF (1);
12474 case CEE_MONO_LDPTR_CARD_TABLE:
12475 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12477 case CEE_MONO_LDPTR_NURSERY_START:
12478 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12480 case CEE_MONO_LDPTR_NURSERY_BITS:
12481 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12483 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12484 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12490 inline_costs += 10 * num_calls++;
12493 case CEE_MONO_LDPTR: {
12496 CHECK_STACK_OVF (1);
12498 token = read32 (ip + 2);
12500 ptr = mono_method_get_wrapper_data (method, token);
12501 EMIT_NEW_PCONST (cfg, ins, ptr);
12504 inline_costs += 10 * num_calls++;
12505 /* Can't embed random pointers into AOT code */
12509 case CEE_MONO_JIT_ICALL_ADDR: {
12510 MonoJitICallInfo *callinfo;
12513 CHECK_STACK_OVF (1);
12515 token = read32 (ip + 2);
12517 ptr = mono_method_get_wrapper_data (method, token);
12518 callinfo = mono_find_jit_icall_by_addr (ptr);
12519 g_assert (callinfo);
12520 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12523 inline_costs += 10 * num_calls++;
12526 case CEE_MONO_ICALL_ADDR: {
12527 MonoMethod *cmethod;
12530 CHECK_STACK_OVF (1);
12532 token = read32 (ip + 2);
12534 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12536 if (cfg->compile_aot) {
12537 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12539 ptr = mono_lookup_internal_call (cmethod);
12541 EMIT_NEW_PCONST (cfg, ins, ptr);
12547 case CEE_MONO_VTADDR: {
12548 MonoInst *src_var, *src;
12554 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12555 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12560 case CEE_MONO_NEWOBJ: {
12561 MonoInst *iargs [2];
12563 CHECK_STACK_OVF (1);
12565 token = read32 (ip + 2);
12566 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12567 mono_class_init (klass);
12568 NEW_DOMAINCONST (cfg, iargs [0]);
12569 MONO_ADD_INS (cfg->cbb, iargs [0]);
12570 NEW_CLASSCONST (cfg, iargs [1], klass);
12571 MONO_ADD_INS (cfg->cbb, iargs [1]);
12572 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12574 inline_costs += 10 * num_calls++;
12577 case CEE_MONO_OBJADDR:
12580 MONO_INST_NEW (cfg, ins, OP_MOVE);
12581 ins->dreg = alloc_ireg_mp (cfg);
12582 ins->sreg1 = sp [0]->dreg;
12583 ins->type = STACK_MP;
12584 MONO_ADD_INS (cfg->cbb, ins);
12588 case CEE_MONO_LDNATIVEOBJ:
12590 * Similar to LDOBJ, but instead load the unmanaged
12591 * representation of the vtype to the stack.
12596 token = read32 (ip + 2);
12597 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12598 g_assert (klass->valuetype);
12599 mono_class_init (klass);
12602 MonoInst *src, *dest, *temp;
12605 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12606 temp->backend.is_pinvoke = 1;
12607 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12608 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12610 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12611 dest->type = STACK_VTYPE;
12612 dest->klass = klass;
12618 case CEE_MONO_RETOBJ: {
12620 * Same as RET, but return the native representation of a vtype
12623 g_assert (cfg->ret);
12624 g_assert (mono_method_signature (method)->pinvoke);
12629 token = read32 (ip + 2);
12630 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12632 if (!cfg->vret_addr) {
12633 g_assert (cfg->ret_var_is_local);
12635 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12637 EMIT_NEW_RETLOADA (cfg, ins);
12639 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12641 if (sp != stack_start)
12644 MONO_INST_NEW (cfg, ins, OP_BR);
12645 ins->inst_target_bb = end_bblock;
12646 MONO_ADD_INS (cfg->cbb, ins);
12647 link_bblock (cfg, cfg->cbb, end_bblock);
12648 start_new_bblock = 1;
12652 case CEE_MONO_CISINST:
12653 case CEE_MONO_CCASTCLASS: {
12658 token = read32 (ip + 2);
12659 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12660 if (ip [1] == CEE_MONO_CISINST)
12661 ins = handle_cisinst (cfg, klass, sp [0]);
12663 ins = handle_ccastclass (cfg, klass, sp [0]);
12668 case CEE_MONO_SAVE_LMF:
12669 case CEE_MONO_RESTORE_LMF:
12672 case CEE_MONO_CLASSCONST:
12673 CHECK_STACK_OVF (1);
12675 token = read32 (ip + 2);
12676 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12679 inline_costs += 10 * num_calls++;
12681 case CEE_MONO_NOT_TAKEN:
12682 cfg->cbb->out_of_line = TRUE;
12685 case CEE_MONO_TLS: {
12688 CHECK_STACK_OVF (1);
12690 key = (MonoTlsKey)read32 (ip + 2);
12691 g_assert (key < TLS_KEY_NUM);
12693 ins = mono_create_tls_get (cfg, key);
12695 if (cfg->compile_aot) {
12697 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12698 ins->dreg = alloc_preg (cfg);
12699 ins->type = STACK_PTR;
12701 g_assert_not_reached ();
12704 ins->type = STACK_PTR;
12705 MONO_ADD_INS (cfg->cbb, ins);
12710 case CEE_MONO_DYN_CALL: {
12711 MonoCallInst *call;
12713 /* It would be easier to call a trampoline, but that would put an
12714 * extra frame on the stack, confusing exception handling. So
12715 * implement it inline using an opcode for now.
12718 if (!cfg->dyn_call_var) {
12719 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12720 /* prevent it from being register allocated */
12721 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12724 /* Has to use a call inst since it local regalloc expects it */
12725 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12726 ins = (MonoInst*)call;
12728 ins->sreg1 = sp [0]->dreg;
12729 ins->sreg2 = sp [1]->dreg;
12730 MONO_ADD_INS (cfg->cbb, ins);
12732 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12735 inline_costs += 10 * num_calls++;
12739 case CEE_MONO_MEMORY_BARRIER: {
12741 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12745 case CEE_MONO_JIT_ATTACH: {
12746 MonoInst *args [16], *domain_ins;
12747 MonoInst *ad_ins, *jit_tls_ins;
12748 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12750 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12752 EMIT_NEW_PCONST (cfg, ins, NULL);
12753 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12755 ad_ins = mono_get_domain_intrinsic (cfg);
12756 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12758 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12759 NEW_BBLOCK (cfg, next_bb);
12760 NEW_BBLOCK (cfg, call_bb);
12762 if (cfg->compile_aot) {
12763 /* AOT code is only used in the root domain */
12764 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12766 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12768 MONO_ADD_INS (cfg->cbb, ad_ins);
12769 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12770 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12772 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12773 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12774 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12776 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12777 MONO_START_BB (cfg, call_bb);
12780 if (cfg->compile_aot) {
12781 /* AOT code is only used in the root domain */
12782 EMIT_NEW_PCONST (cfg, args [0], NULL);
12784 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12786 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12787 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12790 MONO_START_BB (cfg, next_bb);
12794 case CEE_MONO_JIT_DETACH: {
12795 MonoInst *args [16];
12797 /* Restore the original domain */
12798 dreg = alloc_ireg (cfg);
12799 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12800 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12804 case CEE_MONO_CALLI_EXTRA_ARG: {
12806 MonoMethodSignature *fsig;
12810 * This is the same as CEE_CALLI, but passes an additional argument
12811 * to the called method in llvmonly mode.
12812 * This is only used by delegate invoke wrappers to call the
12813 * actual delegate method.
12815 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12818 token = read32 (ip + 2);
12826 fsig = mini_get_signature (method, token, generic_context);
12828 if (cfg->llvm_only)
12829 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12831 n = fsig->param_count + fsig->hasthis + 1;
12838 if (cfg->llvm_only) {
12840 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12841 * cconv. This is set by mono_init_delegate ().
12843 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12844 MonoInst *callee = addr;
12845 MonoInst *call, *localloc_ins;
12846 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12847 int low_bit_reg = alloc_preg (cfg);
12849 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12850 NEW_BBLOCK (cfg, end_bb);
12852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12854 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12856 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12857 addr = emit_get_rgctx_sig (cfg, context_used,
12858 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12860 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12862 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12863 ins->dreg = alloc_preg (cfg);
12864 ins->inst_imm = 2 * SIZEOF_VOID_P;
12865 MONO_ADD_INS (cfg->cbb, ins);
12866 localloc_ins = ins;
12867 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12868 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12869 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12871 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12872 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12874 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12875 MONO_START_BB (cfg, is_gsharedvt_bb);
12876 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12877 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12878 ins->dreg = call->dreg;
12880 MONO_START_BB (cfg, end_bb);
12882 /* Caller uses a normal calling conv */
12884 MonoInst *callee = addr;
12885 MonoInst *call, *localloc_ins;
12886 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12887 int low_bit_reg = alloc_preg (cfg);
12889 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12890 NEW_BBLOCK (cfg, end_bb);
12892 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12893 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12894 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12896 /* Normal case: callee uses a normal cconv, no conversion is needed */
12897 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12898 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12899 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12900 MONO_START_BB (cfg, is_gsharedvt_bb);
12901 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12902 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12903 MONO_ADD_INS (cfg->cbb, addr);
12905 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12907 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12908 ins->dreg = alloc_preg (cfg);
12909 ins->inst_imm = 2 * SIZEOF_VOID_P;
12910 MONO_ADD_INS (cfg->cbb, ins);
12911 localloc_ins = ins;
12912 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12913 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12914 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12916 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12917 ins->dreg = call->dreg;
12918 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12920 MONO_START_BB (cfg, end_bb);
12923 /* Same as CEE_CALLI */
12924 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12926 * We pass the address to the gsharedvt trampoline in the rgctx reg
12928 MonoInst *callee = addr;
12930 addr = emit_get_rgctx_sig (cfg, context_used,
12931 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12932 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12934 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12938 if (!MONO_TYPE_IS_VOID (fsig->ret))
12939 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12941 CHECK_CFG_EXCEPTION;
12945 constrained_class = NULL;
12949 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12955 case CEE_PREFIX1: {
12958 case CEE_ARGLIST: {
12959 /* somewhat similar to LDTOKEN */
12960 MonoInst *addr, *vtvar;
12961 CHECK_STACK_OVF (1);
12962 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12964 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12965 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12967 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12968 ins->type = STACK_VTYPE;
12969 ins->klass = mono_defaults.argumenthandle_class;
12979 MonoInst *cmp, *arg1, *arg2;
12987 * The following transforms:
12988 * CEE_CEQ into OP_CEQ
12989 * CEE_CGT into OP_CGT
12990 * CEE_CGT_UN into OP_CGT_UN
12991 * CEE_CLT into OP_CLT
12992 * CEE_CLT_UN into OP_CLT_UN
12994 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12996 MONO_INST_NEW (cfg, ins, cmp->opcode);
12997 cmp->sreg1 = arg1->dreg;
12998 cmp->sreg2 = arg2->dreg;
12999 type_from_op (cfg, cmp, arg1, arg2);
13001 add_widen_op (cfg, cmp, &arg1, &arg2);
13002 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13003 cmp->opcode = OP_LCOMPARE;
13004 else if (arg1->type == STACK_R4)
13005 cmp->opcode = OP_RCOMPARE;
13006 else if (arg1->type == STACK_R8)
13007 cmp->opcode = OP_FCOMPARE;
13009 cmp->opcode = OP_ICOMPARE;
13010 MONO_ADD_INS (cfg->cbb, cmp);
13011 ins->type = STACK_I4;
13012 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13013 type_from_op (cfg, ins, arg1, arg2);
13015 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13017 * The backends expect the fceq opcodes to do the
13020 ins->sreg1 = cmp->sreg1;
13021 ins->sreg2 = cmp->sreg2;
13024 MONO_ADD_INS (cfg->cbb, ins);
13030 MonoInst *argconst;
13031 MonoMethod *cil_method;
13033 CHECK_STACK_OVF (1);
13035 n = read32 (ip + 2);
13036 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13039 mono_class_init (cmethod->klass);
13041 mono_save_token_info (cfg, image, n, cmethod);
13043 context_used = mini_method_check_context_used (cfg, cmethod);
13045 cil_method = cmethod;
13046 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13047 METHOD_ACCESS_FAILURE (method, cil_method);
13049 if (mono_security_core_clr_enabled ())
13050 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13053 * Optimize the common case of ldftn+delegate creation
13055 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13056 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13057 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13058 MonoInst *target_ins, *handle_ins;
13059 MonoMethod *invoke;
13060 int invoke_context_used;
13062 invoke = mono_get_delegate_invoke (ctor_method->klass);
13063 if (!invoke || !mono_method_signature (invoke))
13066 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13068 target_ins = sp [-1];
13070 if (mono_security_core_clr_enabled ())
13071 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13073 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13074 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13075 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13076 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13077 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13081 /* FIXME: SGEN support */
13082 if (invoke_context_used == 0 || cfg->llvm_only) {
13084 if (cfg->verbose_level > 3)
13085 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13086 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13089 CHECK_CFG_EXCEPTION;
13099 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13100 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13104 inline_costs += 10 * num_calls++;
13107 case CEE_LDVIRTFTN: {
13108 MonoInst *args [2];
13112 n = read32 (ip + 2);
13113 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13116 mono_class_init (cmethod->klass);
13118 context_used = mini_method_check_context_used (cfg, cmethod);
13120 if (mono_security_core_clr_enabled ())
13121 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13124 * Optimize the common case of ldvirtftn+delegate creation
13126 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13127 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13128 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13129 MonoInst *target_ins, *handle_ins;
13130 MonoMethod *invoke;
13131 int invoke_context_used;
13132 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13134 invoke = mono_get_delegate_invoke (ctor_method->klass);
13135 if (!invoke || !mono_method_signature (invoke))
13138 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13140 target_ins = sp [-1];
13142 if (mono_security_core_clr_enabled ())
13143 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13145 /* FIXME: SGEN support */
13146 if (invoke_context_used == 0 || cfg->llvm_only) {
13148 if (cfg->verbose_level > 3)
13149 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13150 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13153 CHECK_CFG_EXCEPTION;
13166 args [1] = emit_get_rgctx_method (cfg, context_used,
13167 cmethod, MONO_RGCTX_INFO_METHOD);
13170 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13172 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13175 inline_costs += 10 * num_calls++;
13179 CHECK_STACK_OVF (1);
13181 n = read16 (ip + 2);
13183 EMIT_NEW_ARGLOAD (cfg, ins, n);
13188 CHECK_STACK_OVF (1);
13190 n = read16 (ip + 2);
13192 NEW_ARGLOADA (cfg, ins, n);
13193 MONO_ADD_INS (cfg->cbb, ins);
13201 n = read16 (ip + 2);
13203 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13205 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13209 CHECK_STACK_OVF (1);
13211 n = read16 (ip + 2);
13213 EMIT_NEW_LOCLOAD (cfg, ins, n);
13218 unsigned char *tmp_ip;
13219 CHECK_STACK_OVF (1);
13221 n = read16 (ip + 2);
13224 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13230 EMIT_NEW_LOCLOADA (cfg, ins, n);
13239 n = read16 (ip + 2);
13241 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13243 emit_stloc_ir (cfg, sp, header, n);
13250 if (sp != stack_start)
13252 if (cfg->method != method)
13254 * Inlining this into a loop in a parent could lead to
13255 * stack overflows which is different behavior than the
13256 * non-inlined case, thus disable inlining in this case.
13258 INLINE_FAILURE("localloc");
13260 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13261 ins->dreg = alloc_preg (cfg);
13262 ins->sreg1 = sp [0]->dreg;
13263 ins->type = STACK_PTR;
13264 MONO_ADD_INS (cfg->cbb, ins);
13266 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13268 ins->flags |= MONO_INST_INIT;
13273 case CEE_ENDFILTER: {
13274 MonoExceptionClause *clause, *nearest;
13279 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13281 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13282 ins->sreg1 = (*sp)->dreg;
13283 MONO_ADD_INS (cfg->cbb, ins);
13284 start_new_bblock = 1;
13288 for (cc = 0; cc < header->num_clauses; ++cc) {
13289 clause = &header->clauses [cc];
13290 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13291 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13292 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13295 g_assert (nearest);
13296 if ((ip - header->code) != nearest->handler_offset)
13301 case CEE_UNALIGNED_:
13302 ins_flag |= MONO_INST_UNALIGNED;
13303 /* FIXME: record alignment? we can assume 1 for now */
13307 case CEE_VOLATILE_:
13308 ins_flag |= MONO_INST_VOLATILE;
13312 ins_flag |= MONO_INST_TAILCALL;
13313 cfg->flags |= MONO_CFG_HAS_TAIL;
13314 /* Can't inline tail calls at this time */
13315 inline_costs += 100000;
13322 token = read32 (ip + 2);
13323 klass = mini_get_class (method, token, generic_context);
13324 CHECK_TYPELOAD (klass);
13325 if (generic_class_is_reference_type (cfg, klass))
13326 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13328 mini_emit_initobj (cfg, *sp, NULL, klass);
13332 case CEE_CONSTRAINED_:
13334 token = read32 (ip + 2);
13335 constrained_class = mini_get_class (method, token, generic_context);
13336 CHECK_TYPELOAD (constrained_class);
13340 case CEE_INITBLK: {
13341 MonoInst *iargs [3];
13345 /* Skip optimized paths for volatile operations. */
13346 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13347 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13348 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13349 /* emit_memset only works when val == 0 */
13350 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13353 iargs [0] = sp [0];
13354 iargs [1] = sp [1];
13355 iargs [2] = sp [2];
13356 if (ip [1] == CEE_CPBLK) {
13358 * FIXME: It's unclear whether we should be emitting both the acquire
13359 * and release barriers for cpblk. It is technically both a load and
13360 * store operation, so it seems like that's the sensible thing to do.
13362 * FIXME: We emit full barriers on both sides of the operation for
13363 * simplicity. We should have a separate atomic memcpy method instead.
13365 MonoMethod *memcpy_method = get_memcpy_method ();
13367 if (ins_flag & MONO_INST_VOLATILE)
13368 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13370 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13371 call->flags |= ins_flag;
13373 if (ins_flag & MONO_INST_VOLATILE)
13374 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13376 MonoMethod *memset_method = get_memset_method ();
13377 if (ins_flag & MONO_INST_VOLATILE) {
13378 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13379 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13381 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13382 call->flags |= ins_flag;
13393 ins_flag |= MONO_INST_NOTYPECHECK;
13395 ins_flag |= MONO_INST_NORANGECHECK;
13396 /* we ignore the no-nullcheck for now since we
13397 * really do it explicitly only when doing callvirt->call
13401 case CEE_RETHROW: {
13403 int handler_offset = -1;
13405 for (i = 0; i < header->num_clauses; ++i) {
13406 MonoExceptionClause *clause = &header->clauses [i];
13407 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13408 handler_offset = clause->handler_offset;
13413 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13415 if (handler_offset == -1)
13418 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13419 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13420 ins->sreg1 = load->dreg;
13421 MONO_ADD_INS (cfg->cbb, ins);
13423 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13424 MONO_ADD_INS (cfg->cbb, ins);
13427 link_bblock (cfg, cfg->cbb, end_bblock);
13428 start_new_bblock = 1;
13436 CHECK_STACK_OVF (1);
13438 token = read32 (ip + 2);
13439 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13440 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13443 val = mono_type_size (type, &ialign);
13445 MonoClass *klass = mini_get_class (method, token, generic_context);
13446 CHECK_TYPELOAD (klass);
13448 val = mono_type_size (&klass->byval_arg, &ialign);
13450 if (mini_is_gsharedvt_klass (klass))
13451 GSHAREDVT_FAILURE (*ip);
13453 EMIT_NEW_ICONST (cfg, ins, val);
13458 case CEE_REFANYTYPE: {
13459 MonoInst *src_var, *src;
13461 GSHAREDVT_FAILURE (*ip);
13467 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13469 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13470 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13471 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13476 case CEE_READONLY_:
13489 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13499 g_warning ("opcode 0x%02x not handled", *ip);
13503 if (start_new_bblock != 1)
13506 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13507 if (cfg->cbb->next_bb) {
13508 /* This could already be set because of inlining, #693905 */
13509 MonoBasicBlock *bb = cfg->cbb;
13511 while (bb->next_bb)
13513 bb->next_bb = end_bblock;
13515 cfg->cbb->next_bb = end_bblock;
13518 if (cfg->method == method && cfg->domainvar) {
13520 MonoInst *get_domain;
13522 cfg->cbb = init_localsbb;
13524 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13525 MONO_ADD_INS (cfg->cbb, get_domain);
13527 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13529 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13530 MONO_ADD_INS (cfg->cbb, store);
13533 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13534 if (cfg->compile_aot)
13535 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13536 mono_get_got_var (cfg);
13539 if (cfg->method == method && cfg->got_var)
13540 mono_emit_load_got_addr (cfg);
13542 if (init_localsbb) {
13543 cfg->cbb = init_localsbb;
13545 for (i = 0; i < header->num_locals; ++i) {
13546 emit_init_local (cfg, i, header->locals [i], init_locals);
13550 if (cfg->init_ref_vars && cfg->method == method) {
13551 /* Emit initialization for ref vars */
13552 // FIXME: Avoid duplication initialization for IL locals.
13553 for (i = 0; i < cfg->num_varinfo; ++i) {
13554 MonoInst *ins = cfg->varinfo [i];
13556 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13557 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13561 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13562 cfg->cbb = init_localsbb;
13563 emit_push_lmf (cfg);
13566 cfg->cbb = init_localsbb;
13567 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13570 MonoBasicBlock *bb;
13573 * Make seq points at backward branch targets interruptable.
13575 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13576 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13577 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13580 /* Add a sequence point for method entry/exit events */
13581 if (seq_points && cfg->gen_sdb_seq_points) {
13582 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13583 MONO_ADD_INS (init_localsbb, ins);
13584 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13585 MONO_ADD_INS (cfg->bb_exit, ins);
13589 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13590 * the code they refer to was dead (#11880).
13592 if (sym_seq_points) {
13593 for (i = 0; i < header->code_size; ++i) {
13594 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13597 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13598 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13605 if (cfg->method == method) {
13606 MonoBasicBlock *bb;
13607 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13608 bb->region = mono_find_block_region (cfg, bb->real_offset);
13610 mono_create_spvar_for_region (cfg, bb->region);
13611 if (cfg->verbose_level > 2)
13612 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13616 if (inline_costs < 0) {
13619 /* Method is too large */
13620 mname = mono_method_full_name (method, TRUE);
13621 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13625 if ((cfg->verbose_level > 2) && (cfg->method == method))
13626 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13631 g_assert (!mono_error_ok (&cfg->error));
13635 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13639 set_exception_type_from_invalid_il (cfg, method, ip);
13643 g_slist_free (class_inits);
13644 mono_basic_block_free (original_bb);
13645 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13646 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13647 if (cfg->exception_type)
13650 return inline_costs;
13654 store_membase_reg_to_store_membase_imm (int opcode)
13657 case OP_STORE_MEMBASE_REG:
13658 return OP_STORE_MEMBASE_IMM;
13659 case OP_STOREI1_MEMBASE_REG:
13660 return OP_STOREI1_MEMBASE_IMM;
13661 case OP_STOREI2_MEMBASE_REG:
13662 return OP_STOREI2_MEMBASE_IMM;
13663 case OP_STOREI4_MEMBASE_REG:
13664 return OP_STOREI4_MEMBASE_IMM;
13665 case OP_STOREI8_MEMBASE_REG:
13666 return OP_STOREI8_MEMBASE_IMM;
13668 g_assert_not_reached ();
13675 mono_op_to_op_imm (int opcode)
13679 return OP_IADD_IMM;
13681 return OP_ISUB_IMM;
13683 return OP_IDIV_IMM;
13685 return OP_IDIV_UN_IMM;
13687 return OP_IREM_IMM;
13689 return OP_IREM_UN_IMM;
13691 return OP_IMUL_IMM;
13693 return OP_IAND_IMM;
13697 return OP_IXOR_IMM;
13699 return OP_ISHL_IMM;
13701 return OP_ISHR_IMM;
13703 return OP_ISHR_UN_IMM;
13706 return OP_LADD_IMM;
13708 return OP_LSUB_IMM;
13710 return OP_LAND_IMM;
13714 return OP_LXOR_IMM;
13716 return OP_LSHL_IMM;
13718 return OP_LSHR_IMM;
13720 return OP_LSHR_UN_IMM;
13721 #if SIZEOF_REGISTER == 8
13723 return OP_LREM_IMM;
13727 return OP_COMPARE_IMM;
13729 return OP_ICOMPARE_IMM;
13731 return OP_LCOMPARE_IMM;
13733 case OP_STORE_MEMBASE_REG:
13734 return OP_STORE_MEMBASE_IMM;
13735 case OP_STOREI1_MEMBASE_REG:
13736 return OP_STOREI1_MEMBASE_IMM;
13737 case OP_STOREI2_MEMBASE_REG:
13738 return OP_STOREI2_MEMBASE_IMM;
13739 case OP_STOREI4_MEMBASE_REG:
13740 return OP_STOREI4_MEMBASE_IMM;
13742 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13744 return OP_X86_PUSH_IMM;
13745 case OP_X86_COMPARE_MEMBASE_REG:
13746 return OP_X86_COMPARE_MEMBASE_IMM;
13748 #if defined(TARGET_AMD64)
13749 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13750 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13752 case OP_VOIDCALL_REG:
13753 return OP_VOIDCALL;
13761 return OP_LOCALLOC_IMM;
13768 ldind_to_load_membase (int opcode)
13772 return OP_LOADI1_MEMBASE;
13774 return OP_LOADU1_MEMBASE;
13776 return OP_LOADI2_MEMBASE;
13778 return OP_LOADU2_MEMBASE;
13780 return OP_LOADI4_MEMBASE;
13782 return OP_LOADU4_MEMBASE;
13784 return OP_LOAD_MEMBASE;
13785 case CEE_LDIND_REF:
13786 return OP_LOAD_MEMBASE;
13788 return OP_LOADI8_MEMBASE;
13790 return OP_LOADR4_MEMBASE;
13792 return OP_LOADR8_MEMBASE;
13794 g_assert_not_reached ();
13801 stind_to_store_membase (int opcode)
13805 return OP_STOREI1_MEMBASE_REG;
13807 return OP_STOREI2_MEMBASE_REG;
13809 return OP_STOREI4_MEMBASE_REG;
13811 case CEE_STIND_REF:
13812 return OP_STORE_MEMBASE_REG;
13814 return OP_STOREI8_MEMBASE_REG;
13816 return OP_STORER4_MEMBASE_REG;
13818 return OP_STORER8_MEMBASE_REG;
13820 g_assert_not_reached ();
13827 mono_load_membase_to_load_mem (int opcode)
13829 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13830 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13832 case OP_LOAD_MEMBASE:
13833 return OP_LOAD_MEM;
13834 case OP_LOADU1_MEMBASE:
13835 return OP_LOADU1_MEM;
13836 case OP_LOADU2_MEMBASE:
13837 return OP_LOADU2_MEM;
13838 case OP_LOADI4_MEMBASE:
13839 return OP_LOADI4_MEM;
13840 case OP_LOADU4_MEMBASE:
13841 return OP_LOADU4_MEM;
13842 #if SIZEOF_REGISTER == 8
13843 case OP_LOADI8_MEMBASE:
13844 return OP_LOADI8_MEM;
13853 op_to_op_dest_membase (int store_opcode, int opcode)
13855 #if defined(TARGET_X86)
13856 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13861 return OP_X86_ADD_MEMBASE_REG;
13863 return OP_X86_SUB_MEMBASE_REG;
13865 return OP_X86_AND_MEMBASE_REG;
13867 return OP_X86_OR_MEMBASE_REG;
13869 return OP_X86_XOR_MEMBASE_REG;
13872 return OP_X86_ADD_MEMBASE_IMM;
13875 return OP_X86_SUB_MEMBASE_IMM;
13878 return OP_X86_AND_MEMBASE_IMM;
13881 return OP_X86_OR_MEMBASE_IMM;
13884 return OP_X86_XOR_MEMBASE_IMM;
13890 #if defined(TARGET_AMD64)
13891 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13896 return OP_X86_ADD_MEMBASE_REG;
13898 return OP_X86_SUB_MEMBASE_REG;
13900 return OP_X86_AND_MEMBASE_REG;
13902 return OP_X86_OR_MEMBASE_REG;
13904 return OP_X86_XOR_MEMBASE_REG;
13906 return OP_X86_ADD_MEMBASE_IMM;
13908 return OP_X86_SUB_MEMBASE_IMM;
13910 return OP_X86_AND_MEMBASE_IMM;
13912 return OP_X86_OR_MEMBASE_IMM;
13914 return OP_X86_XOR_MEMBASE_IMM;
13916 return OP_AMD64_ADD_MEMBASE_REG;
13918 return OP_AMD64_SUB_MEMBASE_REG;
13920 return OP_AMD64_AND_MEMBASE_REG;
13922 return OP_AMD64_OR_MEMBASE_REG;
13924 return OP_AMD64_XOR_MEMBASE_REG;
13927 return OP_AMD64_ADD_MEMBASE_IMM;
13930 return OP_AMD64_SUB_MEMBASE_IMM;
13933 return OP_AMD64_AND_MEMBASE_IMM;
13936 return OP_AMD64_OR_MEMBASE_IMM;
13939 return OP_AMD64_XOR_MEMBASE_IMM;
13949 op_to_op_store_membase (int store_opcode, int opcode)
13951 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13954 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13955 return OP_X86_SETEQ_MEMBASE;
13957 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13958 return OP_X86_SETNE_MEMBASE;
13966 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13969 /* FIXME: This has sign extension issues */
13971 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13972 return OP_X86_COMPARE_MEMBASE8_IMM;
13975 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13980 return OP_X86_PUSH_MEMBASE;
13981 case OP_COMPARE_IMM:
13982 case OP_ICOMPARE_IMM:
13983 return OP_X86_COMPARE_MEMBASE_IMM;
13986 return OP_X86_COMPARE_MEMBASE_REG;
13990 #ifdef TARGET_AMD64
13991 /* FIXME: This has sign extension issues */
13993 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13994 return OP_X86_COMPARE_MEMBASE8_IMM;
13999 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14000 return OP_X86_PUSH_MEMBASE;
14002 /* FIXME: This only works for 32 bit immediates
14003 case OP_COMPARE_IMM:
14004 case OP_LCOMPARE_IMM:
14005 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14006 return OP_AMD64_COMPARE_MEMBASE_IMM;
14008 case OP_ICOMPARE_IMM:
14009 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14010 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14014 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14015 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14016 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14017 return OP_AMD64_COMPARE_MEMBASE_REG;
14020 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14021 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14030 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14033 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14039 return OP_X86_COMPARE_REG_MEMBASE;
14041 return OP_X86_ADD_REG_MEMBASE;
14043 return OP_X86_SUB_REG_MEMBASE;
14045 return OP_X86_AND_REG_MEMBASE;
14047 return OP_X86_OR_REG_MEMBASE;
14049 return OP_X86_XOR_REG_MEMBASE;
14053 #ifdef TARGET_AMD64
14054 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14057 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14059 return OP_X86_ADD_REG_MEMBASE;
14061 return OP_X86_SUB_REG_MEMBASE;
14063 return OP_X86_AND_REG_MEMBASE;
14065 return OP_X86_OR_REG_MEMBASE;
14067 return OP_X86_XOR_REG_MEMBASE;
14069 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14073 return OP_AMD64_COMPARE_REG_MEMBASE;
14075 return OP_AMD64_ADD_REG_MEMBASE;
14077 return OP_AMD64_SUB_REG_MEMBASE;
14079 return OP_AMD64_AND_REG_MEMBASE;
14081 return OP_AMD64_OR_REG_MEMBASE;
14083 return OP_AMD64_XOR_REG_MEMBASE;
14092 mono_op_to_op_imm_noemul (int opcode)
14095 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14101 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14108 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14113 return mono_op_to_op_imm (opcode);
14118 * mono_handle_global_vregs:
14120 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14124 mono_handle_global_vregs (MonoCompile *cfg)
14126 gint32 *vreg_to_bb;
14127 MonoBasicBlock *bb;
14130 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14132 #ifdef MONO_ARCH_SIMD_INTRINSICS
14133 if (cfg->uses_simd_intrinsics)
14134 mono_simd_simplify_indirection (cfg);
14137 /* Find local vregs used in more than one bb */
14138 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14139 MonoInst *ins = bb->code;
14140 int block_num = bb->block_num;
14142 if (cfg->verbose_level > 2)
14143 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14146 for (; ins; ins = ins->next) {
14147 const char *spec = INS_INFO (ins->opcode);
14148 int regtype = 0, regindex;
14151 if (G_UNLIKELY (cfg->verbose_level > 2))
14152 mono_print_ins (ins);
14154 g_assert (ins->opcode >= MONO_CEE_LAST);
14156 for (regindex = 0; regindex < 4; regindex ++) {
14159 if (regindex == 0) {
14160 regtype = spec [MONO_INST_DEST];
14161 if (regtype == ' ')
14164 } else if (regindex == 1) {
14165 regtype = spec [MONO_INST_SRC1];
14166 if (regtype == ' ')
14169 } else if (regindex == 2) {
14170 regtype = spec [MONO_INST_SRC2];
14171 if (regtype == ' ')
14174 } else if (regindex == 3) {
14175 regtype = spec [MONO_INST_SRC3];
14176 if (regtype == ' ')
14181 #if SIZEOF_REGISTER == 4
14182 /* In the LLVM case, the long opcodes are not decomposed */
14183 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14185 * Since some instructions reference the original long vreg,
14186 * and some reference the two component vregs, it is quite hard
14187 * to determine when it needs to be global. So be conservative.
14189 if (!get_vreg_to_inst (cfg, vreg)) {
14190 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14192 if (cfg->verbose_level > 2)
14193 printf ("LONG VREG R%d made global.\n", vreg);
14197 * Make the component vregs volatile since the optimizations can
14198 * get confused otherwise.
14200 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14201 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14205 g_assert (vreg != -1);
14207 prev_bb = vreg_to_bb [vreg];
14208 if (prev_bb == 0) {
14209 /* 0 is a valid block num */
14210 vreg_to_bb [vreg] = block_num + 1;
14211 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14212 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14215 if (!get_vreg_to_inst (cfg, vreg)) {
14216 if (G_UNLIKELY (cfg->verbose_level > 2))
14217 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14221 if (vreg_is_ref (cfg, vreg))
14222 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14224 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14227 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14230 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14233 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14236 g_assert_not_reached ();
14240 /* Flag as having been used in more than one bb */
14241 vreg_to_bb [vreg] = -1;
14247 /* If a variable is used in only one bblock, convert it into a local vreg */
14248 for (i = 0; i < cfg->num_varinfo; i++) {
14249 MonoInst *var = cfg->varinfo [i];
14250 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14252 switch (var->type) {
14258 #if SIZEOF_REGISTER == 8
14261 #if !defined(TARGET_X86)
14262 /* Enabling this screws up the fp stack on x86 */
14265 if (mono_arch_is_soft_float ())
14269 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14273 /* Arguments are implicitly global */
14274 /* Putting R4 vars into registers doesn't work currently */
14275 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14276 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14278 * Make that the variable's liveness interval doesn't contain a call, since
14279 * that would cause the lvreg to be spilled, making the whole optimization
14282 /* This is too slow for JIT compilation */
14284 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14286 int def_index, call_index, ins_index;
14287 gboolean spilled = FALSE;
14292 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14293 const char *spec = INS_INFO (ins->opcode);
14295 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14296 def_index = ins_index;
14298 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14299 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14300 if (call_index > def_index) {
14306 if (MONO_IS_CALL (ins))
14307 call_index = ins_index;
14317 if (G_UNLIKELY (cfg->verbose_level > 2))
14318 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14319 var->flags |= MONO_INST_IS_DEAD;
14320 cfg->vreg_to_inst [var->dreg] = NULL;
14327 * Compress the varinfo and vars tables so the liveness computation is faster and
14328 * takes up less space.
14331 for (i = 0; i < cfg->num_varinfo; ++i) {
14332 MonoInst *var = cfg->varinfo [i];
14333 if (pos < i && cfg->locals_start == i)
14334 cfg->locals_start = pos;
14335 if (!(var->flags & MONO_INST_IS_DEAD)) {
14337 cfg->varinfo [pos] = cfg->varinfo [i];
14338 cfg->varinfo [pos]->inst_c0 = pos;
14339 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14340 cfg->vars [pos].idx = pos;
14341 #if SIZEOF_REGISTER == 4
14342 if (cfg->varinfo [pos]->type == STACK_I8) {
14343 /* Modify the two component vars too */
14346 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14347 var1->inst_c0 = pos;
14348 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14349 var1->inst_c0 = pos;
14356 cfg->num_varinfo = pos;
14357 if (cfg->locals_start > cfg->num_varinfo)
14358 cfg->locals_start = cfg->num_varinfo;
14362 * mono_allocate_gsharedvt_vars:
14364 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14365 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14368 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14372 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14374 for (i = 0; i < cfg->num_varinfo; ++i) {
14375 MonoInst *ins = cfg->varinfo [i];
14378 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14379 if (i >= cfg->locals_start) {
14381 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14382 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14383 ins->opcode = OP_GSHAREDVT_LOCAL;
14384 ins->inst_imm = idx;
14387 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14388 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14395 * mono_spill_global_vars:
14397 * Generate spill code for variables which are not allocated to registers,
14398 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14399 * code is generated which could be optimized by the local optimization passes.
14402 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14404 MonoBasicBlock *bb;
14406 int orig_next_vreg;
14407 guint32 *vreg_to_lvreg;
14409 guint32 i, lvregs_len;
14410 gboolean dest_has_lvreg = FALSE;
14411 MonoStackType stacktypes [128];
14412 MonoInst **live_range_start, **live_range_end;
14413 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14415 *need_local_opts = FALSE;
14417 memset (spec2, 0, sizeof (spec2));
14419 /* FIXME: Move this function to mini.c */
14420 stacktypes ['i'] = STACK_PTR;
14421 stacktypes ['l'] = STACK_I8;
14422 stacktypes ['f'] = STACK_R8;
14423 #ifdef MONO_ARCH_SIMD_INTRINSICS
14424 stacktypes ['x'] = STACK_VTYPE;
14427 #if SIZEOF_REGISTER == 4
14428 /* Create MonoInsts for longs */
14429 for (i = 0; i < cfg->num_varinfo; i++) {
14430 MonoInst *ins = cfg->varinfo [i];
14432 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14433 switch (ins->type) {
14438 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14441 g_assert (ins->opcode == OP_REGOFFSET);
14443 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14445 tree->opcode = OP_REGOFFSET;
14446 tree->inst_basereg = ins->inst_basereg;
14447 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14449 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14451 tree->opcode = OP_REGOFFSET;
14452 tree->inst_basereg = ins->inst_basereg;
14453 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14463 if (cfg->compute_gc_maps) {
14464 /* registers need liveness info even for !non refs */
14465 for (i = 0; i < cfg->num_varinfo; i++) {
14466 MonoInst *ins = cfg->varinfo [i];
14468 if (ins->opcode == OP_REGVAR)
14469 ins->flags |= MONO_INST_GC_TRACK;
14473 /* FIXME: widening and truncation */
14476 * As an optimization, when a variable allocated to the stack is first loaded into
14477 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14478 * the variable again.
14480 orig_next_vreg = cfg->next_vreg;
14481 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14482 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14486 * These arrays contain the first and last instructions accessing a given
14488 * Since we emit bblocks in the same order we process them here, and we
14489 * don't split live ranges, these will precisely describe the live range of
14490 * the variable, i.e. the instruction range where a valid value can be found
14491 * in the variables location.
14492 * The live range is computed using the liveness info computed by the liveness pass.
14493 * We can't use vmv->range, since that is an abstract live range, and we need
14494 * one which is instruction precise.
14495 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14497 /* FIXME: Only do this if debugging info is requested */
14498 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14499 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14500 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14501 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14503 /* Add spill loads/stores */
14504 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14507 if (cfg->verbose_level > 2)
14508 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14510 /* Clear vreg_to_lvreg array */
14511 for (i = 0; i < lvregs_len; i++)
14512 vreg_to_lvreg [lvregs [i]] = 0;
14516 MONO_BB_FOR_EACH_INS (bb, ins) {
14517 const char *spec = INS_INFO (ins->opcode);
14518 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14519 gboolean store, no_lvreg;
14520 int sregs [MONO_MAX_SRC_REGS];
14522 if (G_UNLIKELY (cfg->verbose_level > 2))
14523 mono_print_ins (ins);
14525 if (ins->opcode == OP_NOP)
14529 * We handle LDADDR here as well, since it can only be decomposed
14530 * when variable addresses are known.
14532 if (ins->opcode == OP_LDADDR) {
14533 MonoInst *var = (MonoInst *)ins->inst_p0;
14535 if (var->opcode == OP_VTARG_ADDR) {
14536 /* Happens on SPARC/S390 where vtypes are passed by reference */
14537 MonoInst *vtaddr = var->inst_left;
14538 if (vtaddr->opcode == OP_REGVAR) {
14539 ins->opcode = OP_MOVE;
14540 ins->sreg1 = vtaddr->dreg;
14542 else if (var->inst_left->opcode == OP_REGOFFSET) {
14543 ins->opcode = OP_LOAD_MEMBASE;
14544 ins->inst_basereg = vtaddr->inst_basereg;
14545 ins->inst_offset = vtaddr->inst_offset;
14548 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14549 /* gsharedvt arg passed by ref */
14550 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14552 ins->opcode = OP_LOAD_MEMBASE;
14553 ins->inst_basereg = var->inst_basereg;
14554 ins->inst_offset = var->inst_offset;
14555 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14556 MonoInst *load, *load2, *load3;
14557 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14558 int reg1, reg2, reg3;
14559 MonoInst *info_var = cfg->gsharedvt_info_var;
14560 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14564 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14567 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14569 g_assert (info_var);
14570 g_assert (locals_var);
14572 /* Mark the instruction used to compute the locals var as used */
14573 cfg->gsharedvt_locals_var_ins = NULL;
14575 /* Load the offset */
14576 if (info_var->opcode == OP_REGOFFSET) {
14577 reg1 = alloc_ireg (cfg);
14578 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14579 } else if (info_var->opcode == OP_REGVAR) {
14581 reg1 = info_var->dreg;
14583 g_assert_not_reached ();
14585 reg2 = alloc_ireg (cfg);
14586 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14587 /* Load the locals area address */
14588 reg3 = alloc_ireg (cfg);
14589 if (locals_var->opcode == OP_REGOFFSET) {
14590 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14591 } else if (locals_var->opcode == OP_REGVAR) {
14592 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14594 g_assert_not_reached ();
14596 /* Compute the address */
14597 ins->opcode = OP_PADD;
14601 mono_bblock_insert_before_ins (bb, ins, load3);
14602 mono_bblock_insert_before_ins (bb, load3, load2);
14604 mono_bblock_insert_before_ins (bb, load2, load);
14606 g_assert (var->opcode == OP_REGOFFSET);
14608 ins->opcode = OP_ADD_IMM;
14609 ins->sreg1 = var->inst_basereg;
14610 ins->inst_imm = var->inst_offset;
14613 *need_local_opts = TRUE;
14614 spec = INS_INFO (ins->opcode);
14617 if (ins->opcode < MONO_CEE_LAST) {
14618 mono_print_ins (ins);
14619 g_assert_not_reached ();
14623 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14627 if (MONO_IS_STORE_MEMBASE (ins)) {
14628 tmp_reg = ins->dreg;
14629 ins->dreg = ins->sreg2;
14630 ins->sreg2 = tmp_reg;
14633 spec2 [MONO_INST_DEST] = ' ';
14634 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14635 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14636 spec2 [MONO_INST_SRC3] = ' ';
14638 } else if (MONO_IS_STORE_MEMINDEX (ins))
14639 g_assert_not_reached ();
14644 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14645 printf ("\t %.3s %d", spec, ins->dreg);
14646 num_sregs = mono_inst_get_src_registers (ins, sregs);
14647 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14648 printf (" %d", sregs [srcindex]);
14655 regtype = spec [MONO_INST_DEST];
14656 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14659 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14660 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14661 MonoInst *store_ins;
14663 MonoInst *def_ins = ins;
14664 int dreg = ins->dreg; /* The original vreg */
14666 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14668 if (var->opcode == OP_REGVAR) {
14669 ins->dreg = var->dreg;
14670 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14672 * Instead of emitting a load+store, use a _membase opcode.
14674 g_assert (var->opcode == OP_REGOFFSET);
14675 if (ins->opcode == OP_MOVE) {
14679 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14680 ins->inst_basereg = var->inst_basereg;
14681 ins->inst_offset = var->inst_offset;
14684 spec = INS_INFO (ins->opcode);
14688 g_assert (var->opcode == OP_REGOFFSET);
14690 prev_dreg = ins->dreg;
14692 /* Invalidate any previous lvreg for this vreg */
14693 vreg_to_lvreg [ins->dreg] = 0;
14697 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14699 store_opcode = OP_STOREI8_MEMBASE_REG;
14702 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14704 #if SIZEOF_REGISTER != 8
14705 if (regtype == 'l') {
14706 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14707 mono_bblock_insert_after_ins (bb, ins, store_ins);
14708 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14709 mono_bblock_insert_after_ins (bb, ins, store_ins);
14710 def_ins = store_ins;
14715 g_assert (store_opcode != OP_STOREV_MEMBASE);
14717 /* Try to fuse the store into the instruction itself */
14718 /* FIXME: Add more instructions */
14719 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14720 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14721 ins->inst_imm = ins->inst_c0;
14722 ins->inst_destbasereg = var->inst_basereg;
14723 ins->inst_offset = var->inst_offset;
14724 spec = INS_INFO (ins->opcode);
14725 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14726 ins->opcode = store_opcode;
14727 ins->inst_destbasereg = var->inst_basereg;
14728 ins->inst_offset = var->inst_offset;
14732 tmp_reg = ins->dreg;
14733 ins->dreg = ins->sreg2;
14734 ins->sreg2 = tmp_reg;
14737 spec2 [MONO_INST_DEST] = ' ';
14738 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14739 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14740 spec2 [MONO_INST_SRC3] = ' ';
14742 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14743 // FIXME: The backends expect the base reg to be in inst_basereg
14744 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14746 ins->inst_basereg = var->inst_basereg;
14747 ins->inst_offset = var->inst_offset;
14748 spec = INS_INFO (ins->opcode);
14750 /* printf ("INS: "); mono_print_ins (ins); */
14751 /* Create a store instruction */
14752 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14754 /* Insert it after the instruction */
14755 mono_bblock_insert_after_ins (bb, ins, store_ins);
14757 def_ins = store_ins;
14760 * We can't assign ins->dreg to var->dreg here, since the
14761 * sregs could use it. So set a flag, and do it after
14764 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14765 dest_has_lvreg = TRUE;
14770 if (def_ins && !live_range_start [dreg]) {
14771 live_range_start [dreg] = def_ins;
14772 live_range_start_bb [dreg] = bb;
14775 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14778 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14779 tmp->inst_c1 = dreg;
14780 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14787 num_sregs = mono_inst_get_src_registers (ins, sregs);
14788 for (srcindex = 0; srcindex < 3; ++srcindex) {
14789 regtype = spec [MONO_INST_SRC1 + srcindex];
14790 sreg = sregs [srcindex];
14792 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14793 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14794 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14795 MonoInst *use_ins = ins;
14796 MonoInst *load_ins;
14797 guint32 load_opcode;
14799 if (var->opcode == OP_REGVAR) {
14800 sregs [srcindex] = var->dreg;
14801 //mono_inst_set_src_registers (ins, sregs);
14802 live_range_end [sreg] = use_ins;
14803 live_range_end_bb [sreg] = bb;
14805 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14808 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14809 /* var->dreg is a hreg */
14810 tmp->inst_c1 = sreg;
14811 mono_bblock_insert_after_ins (bb, ins, tmp);
14817 g_assert (var->opcode == OP_REGOFFSET);
14819 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14821 g_assert (load_opcode != OP_LOADV_MEMBASE);
14823 if (vreg_to_lvreg [sreg]) {
14824 g_assert (vreg_to_lvreg [sreg] != -1);
14826 /* The variable is already loaded to an lvreg */
14827 if (G_UNLIKELY (cfg->verbose_level > 2))
14828 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14829 sregs [srcindex] = vreg_to_lvreg [sreg];
14830 //mono_inst_set_src_registers (ins, sregs);
14834 /* Try to fuse the load into the instruction */
14835 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14836 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14837 sregs [0] = var->inst_basereg;
14838 //mono_inst_set_src_registers (ins, sregs);
14839 ins->inst_offset = var->inst_offset;
14840 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14841 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14842 sregs [1] = var->inst_basereg;
14843 //mono_inst_set_src_registers (ins, sregs);
14844 ins->inst_offset = var->inst_offset;
14846 if (MONO_IS_REAL_MOVE (ins)) {
14847 ins->opcode = OP_NOP;
14850 //printf ("%d ", srcindex); mono_print_ins (ins);
14852 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14854 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14855 if (var->dreg == prev_dreg) {
14857 * sreg refers to the value loaded by the load
14858 * emitted below, but we need to use ins->dreg
14859 * since it refers to the store emitted earlier.
14863 g_assert (sreg != -1);
14864 vreg_to_lvreg [var->dreg] = sreg;
14865 g_assert (lvregs_len < 1024);
14866 lvregs [lvregs_len ++] = var->dreg;
14870 sregs [srcindex] = sreg;
14871 //mono_inst_set_src_registers (ins, sregs);
14873 #if SIZEOF_REGISTER != 8
14874 if (regtype == 'l') {
14875 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14876 mono_bblock_insert_before_ins (bb, ins, load_ins);
14877 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14878 mono_bblock_insert_before_ins (bb, ins, load_ins);
14879 use_ins = load_ins;
14884 #if SIZEOF_REGISTER == 4
14885 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14887 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14888 mono_bblock_insert_before_ins (bb, ins, load_ins);
14889 use_ins = load_ins;
14893 if (var->dreg < orig_next_vreg) {
14894 live_range_end [var->dreg] = use_ins;
14895 live_range_end_bb [var->dreg] = bb;
14898 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14901 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14902 tmp->inst_c1 = var->dreg;
14903 mono_bblock_insert_after_ins (bb, ins, tmp);
14907 mono_inst_set_src_registers (ins, sregs);
14909 if (dest_has_lvreg) {
14910 g_assert (ins->dreg != -1);
14911 vreg_to_lvreg [prev_dreg] = ins->dreg;
14912 g_assert (lvregs_len < 1024);
14913 lvregs [lvregs_len ++] = prev_dreg;
14914 dest_has_lvreg = FALSE;
14918 tmp_reg = ins->dreg;
14919 ins->dreg = ins->sreg2;
14920 ins->sreg2 = tmp_reg;
14923 if (MONO_IS_CALL (ins)) {
14924 /* Clear vreg_to_lvreg array */
14925 for (i = 0; i < lvregs_len; i++)
14926 vreg_to_lvreg [lvregs [i]] = 0;
14928 } else if (ins->opcode == OP_NOP) {
14930 MONO_INST_NULLIFY_SREGS (ins);
14933 if (cfg->verbose_level > 2)
14934 mono_print_ins_index (1, ins);
14937 /* Extend the live range based on the liveness info */
14938 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14939 for (i = 0; i < cfg->num_varinfo; i ++) {
14940 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14942 if (vreg_is_volatile (cfg, vi->vreg))
14943 /* The liveness info is incomplete */
14946 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14947 /* Live from at least the first ins of this bb */
14948 live_range_start [vi->vreg] = bb->code;
14949 live_range_start_bb [vi->vreg] = bb;
14952 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14953 /* Live at least until the last ins of this bb */
14954 live_range_end [vi->vreg] = bb->last_ins;
14955 live_range_end_bb [vi->vreg] = bb;
14962 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14963 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14965 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14966 for (i = 0; i < cfg->num_varinfo; ++i) {
14967 int vreg = MONO_VARINFO (cfg, i)->vreg;
14970 if (live_range_start [vreg]) {
14971 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14973 ins->inst_c1 = vreg;
14974 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14976 if (live_range_end [vreg]) {
14977 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14979 ins->inst_c1 = vreg;
14980 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14981 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14983 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14988 if (cfg->gsharedvt_locals_var_ins) {
14989 /* Nullify if unused */
14990 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14991 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14994 g_free (live_range_start);
14995 g_free (live_range_end);
14996 g_free (live_range_start_bb);
14997 g_free (live_range_end_bb);
15002 * - use 'iadd' instead of 'int_add'
15003 * - handling ovf opcodes: decompose in method_to_ir.
15004 * - unify iregs/fregs
15005 * -> partly done, the missing parts are:
15006 * - a more complete unification would involve unifying the hregs as well, so
15007 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15008 * would no longer map to the machine hregs, so the code generators would need to
15009 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15010 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15011 * fp/non-fp branches speeds it up by about 15%.
15012 * - use sext/zext opcodes instead of shifts
15014 * - get rid of TEMPLOADs if possible and use vregs instead
15015 * - clean up usage of OP_P/OP_ opcodes
15016 * - cleanup usage of DUMMY_USE
15017 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15019 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15020 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15021 * - make sure handle_stack_args () is called before the branch is emitted
15022 * - when the new IR is done, get rid of all unused stuff
15023 * - COMPARE/BEQ as separate instructions or unify them ?
15024 * - keeping them separate allows specialized compare instructions like
15025 * compare_imm, compare_membase
15026 * - most back ends unify fp compare+branch, fp compare+ceq
15027 * - integrate mono_save_args into inline_method
15028 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15029 * - handle long shift opts on 32 bit platforms somehow: they require
15030 * 3 sregs (2 for arg1 and 1 for arg2)
15031 * - make byref a 'normal' type.
15032 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15033 * variable if needed.
15034 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15035 * like inline_method.
15036 * - remove inlining restrictions
15037 * - fix LNEG and enable cfold of INEG
15038 * - generalize x86 optimizations like ldelema as a peephole optimization
15039 * - add store_mem_imm for amd64
15040 * - optimize the loading of the interruption flag in the managed->native wrappers
15041 * - avoid special handling of OP_NOP in passes
15042 * - move code inserting instructions into one function/macro.
15043 * - try a coalescing phase after liveness analysis
15044 * - add float -> vreg conversion + local optimizations on !x86
15045 * - figure out how to handle decomposed branches during optimizations, ie.
15046 * compare+branch, op_jump_table+op_br etc.
15047 * - promote RuntimeXHandles to vregs
15048 * - vtype cleanups:
15049 * - add a NEW_VARLOADA_VREG macro
15050 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15051 * accessing vtype fields.
15052 * - get rid of I8CONST on 64 bit platforms
15053 * - dealing with the increase in code size due to branches created during opcode
15055 * - use extended basic blocks
15056 * - all parts of the JIT
15057 * - handle_global_vregs () && local regalloc
15058 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15059 * - sources of increase in code size:
15062 * - isinst and castclass
15063 * - lvregs not allocated to global registers even if used multiple times
15064 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15066 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15067 * - add all micro optimizations from the old JIT
15068 * - put tree optimizations into the deadce pass
15069 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15070 * specific function.
15071 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15072 * fcompare + branchCC.
15073 * - create a helper function for allocating a stack slot, taking into account
15074 * MONO_CFG_HAS_SPILLUP.
15076 * - merge the ia64 switch changes.
15077 * - optimize mono_regstate2_alloc_int/float.
15078 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15079 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15080 * parts of the tree could be separated by other instructions, killing the tree
15081 * arguments, or stores killing loads etc. Also, should we fold loads into other
15082 * instructions if the result of the load is used multiple times ?
15083 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15084 * - LAST MERGE: 108395.
15085 * - when returning vtypes in registers, generate IR and append it to the end of the
15086 * last bb instead of doing it in the epilog.
15087 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15095 - When to decompose opcodes:
15096 - earlier: this makes some optimizations hard to implement, since the low level IR
15097 no longer contains the neccessary information. But it is easier to do.
15098 - later: harder to implement, enables more optimizations.
15099 - Branches inside bblocks:
15100 - created when decomposing complex opcodes.
15101 - branches to another bblock: harmless, but not tracked by the branch
15102 optimizations, so need to branch to a label at the start of the bblock.
15103 - branches to inside the same bblock: very problematic, trips up the local
15104 reg allocator. Can be fixed by spitting the current bblock, but that is a
15105 complex operation, since some local vregs can become global vregs etc.
15106 - Local/global vregs:
15107 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15108 local register allocator.
15109 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15110 structure, created by mono_create_var (). Assigned to hregs or the stack by
15111 the global register allocator.
15112 - When to do optimizations like alu->alu_imm:
15113 - earlier -> saves work later on since the IR will be smaller/simpler
15114 - later -> can work on more instructions
15115 - Handling of valuetypes:
15116 - When a vtype is pushed on the stack, a new temporary is created, an
15117 instruction computing its address (LDADDR) is emitted and pushed on
15118 the stack. Need to optimize cases when the vtype is used immediately as in
15119 argument passing, stloc etc.
15120 - Instead of the to_end stuff in the old JIT, simply call the function handling
15121 the values on the stack before emitting the last instruction of the bb.
15124 #endif /* DISABLE_JIT */