2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
158 * Instruction metadata
166 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
167 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
173 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
178 /* keep in sync with the enum in mini.h */
181 #include "mini-ops.h"
186 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
187 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
189 * This should contain the index of the last sreg + 1. This is not the same
190 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
192 const gint8 ins_sreg_counts[] = {
193 #include "mini-ops.h"
198 #define MONO_INIT_VARINFO(vi,id) do { \
199 (vi)->range.first_use.pos.bid = 0xffff; \
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_replace_type (type);
280 switch (type->type) {
283 case MONO_TYPE_BOOLEAN:
295 case MONO_TYPE_FNPTR:
297 case MONO_TYPE_CLASS:
298 case MONO_TYPE_STRING:
299 case MONO_TYPE_OBJECT:
300 case MONO_TYPE_SZARRAY:
301 case MONO_TYPE_ARRAY:
305 #if SIZEOF_REGISTER == 8
314 case MONO_TYPE_VALUETYPE:
315 if (type->data.klass->enumtype) {
316 type = mono_class_enum_basetype (type->data.klass);
319 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
322 case MONO_TYPE_TYPEDBYREF:
324 case MONO_TYPE_GENERICINST:
325 type = &type->data.generic_class->container_class->byval_arg;
329 g_assert (cfg->generic_sharing_context);
330 if (mini_type_var_is_vt (cfg, type))
335 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 mono_print_bb (MonoBasicBlock *bb, const char *msg)
346 printf ("\n%s %d: [IN: ", msg, bb->block_num);
347 for (i = 0; i < bb->in_count; ++i)
348 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 for (i = 0; i < bb->out_count; ++i)
351 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 for (tree = bb->code; tree; tree = tree->next)
354 mono_print_ins_index (-1, tree);
358 mono_create_helper_signatures (void)
360 helper_sig_domain_get = mono_create_icall_signature ("ptr");
361 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
362 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
364 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
365 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
366 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
369 static MONO_NEVER_INLINE void
370 break_on_unverified (void)
372 if (mini_get_debug_options ()->break_on_unverified)
376 static MONO_NEVER_INLINE void
377 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
382 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
383 g_free (method_fname);
384 g_free (cil_method_fname);
387 static MONO_NEVER_INLINE void
388 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
390 char *method_fname = mono_method_full_name (method, TRUE);
391 char *field_fname = mono_field_full_name (field);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
393 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
394 g_free (method_fname);
395 g_free (field_fname);
398 static MONO_NEVER_INLINE void
399 inline_failure (MonoCompile *cfg, const char *msg)
401 if (cfg->verbose_level >= 2)
402 printf ("inline failed: %s\n", msg);
403 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
406 static MONO_NEVER_INLINE void
407 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
409 if (cfg->verbose_level > 2) \
410 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
411 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 static MONO_NEVER_INLINE void
415 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
417 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
418 if (cfg->verbose_level >= 2)
419 printf ("%s\n", cfg->exception_message);
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
424 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
425 * foo<T> (int i) { ldarg.0; box T; }
427 #define UNVERIFIED do { \
428 if (cfg->gsharedvt) { \
429 if (cfg->verbose_level > 2) \
430 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
431 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
432 goto exception_exit; \
434 break_on_unverified (); \
438 #define GET_BBLOCK(cfg,tblock,ip) do { \
439 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
441 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
442 NEW_BBLOCK (cfg, (tblock)); \
443 (tblock)->cil_code = (ip); \
444 ADD_BBLOCK (cfg, (tblock)); \
448 #if defined(TARGET_X86) || defined(TARGET_AMD64)
449 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
450 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
451 (dest)->dreg = alloc_ireg_mp ((cfg)); \
452 (dest)->sreg1 = (sr1); \
453 (dest)->sreg2 = (sr2); \
454 (dest)->inst_imm = (imm); \
455 (dest)->backend.shift_amount = (shift); \
456 MONO_ADD_INS ((cfg)->cbb, (dest)); \
460 #if SIZEOF_REGISTER == 8
461 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
462 /* FIXME: Need to add many more cases */ \
463 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
465 int dr = alloc_preg (cfg); \
466 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
467 (ins)->sreg2 = widen->dreg; \
471 #define ADD_WIDEN_OP(ins, arg1, arg2)
474 #define ADD_BINOP(op) do { \
475 MONO_INST_NEW (cfg, ins, (op)); \
477 ins->sreg1 = sp [0]->dreg; \
478 ins->sreg2 = sp [1]->dreg; \
479 type_from_op (ins, sp [0], sp [1]); \
481 /* Have to insert a widening op */ \
482 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
483 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
484 MONO_ADD_INS ((cfg)->cbb, (ins)); \
485 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
488 #define ADD_UNOP(op) do { \
489 MONO_INST_NEW (cfg, ins, (op)); \
491 ins->sreg1 = sp [0]->dreg; \
492 type_from_op (ins, sp [0], NULL); \
494 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
495 MONO_ADD_INS ((cfg)->cbb, (ins)); \
496 *sp++ = mono_decompose_opcode (cfg, ins); \
499 #define ADD_BINCOND(next_block) do { \
502 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
503 cmp->sreg1 = sp [0]->dreg; \
504 cmp->sreg2 = sp [1]->dreg; \
505 type_from_op (cmp, sp [0], sp [1]); \
507 type_from_op (ins, sp [0], sp [1]); \
508 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
509 GET_BBLOCK (cfg, tblock, target); \
510 link_bblock (cfg, bblock, tblock); \
511 ins->inst_true_bb = tblock; \
512 if ((next_block)) { \
513 link_bblock (cfg, bblock, (next_block)); \
514 ins->inst_false_bb = (next_block); \
515 start_new_bblock = 1; \
517 GET_BBLOCK (cfg, tblock, ip); \
518 link_bblock (cfg, bblock, tblock); \
519 ins->inst_false_bb = tblock; \
520 start_new_bblock = 2; \
522 if (sp != stack_start) { \
523 handle_stack_args (cfg, stack_start, sp - stack_start); \
524 CHECK_UNVERIFIABLE (cfg); \
526 MONO_ADD_INS (bblock, cmp); \
527 MONO_ADD_INS (bblock, ins); \
531 * link_bblock: Links two basic blocks
533 * links two basic blocks in the control flow graph, the 'from'
534 * argument is the starting block and the 'to' argument is the block
535 * the control flow ends to after 'from'.
538 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
540 MonoBasicBlock **newa;
544 if (from->cil_code) {
546 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
548 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
551 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
553 printf ("edge from entry to exit\n");
558 for (i = 0; i < from->out_count; ++i) {
559 if (to == from->out_bb [i]) {
565 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
566 for (i = 0; i < from->out_count; ++i) {
567 newa [i] = from->out_bb [i];
575 for (i = 0; i < to->in_count; ++i) {
576 if (from == to->in_bb [i]) {
582 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
583 for (i = 0; i < to->in_count; ++i) {
584 newa [i] = to->in_bb [i];
593 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
595 link_bblock (cfg, from, to);
599 * mono_find_block_region:
601 * We mark each basic block with a region ID. We use that to avoid BB
602 * optimizations when blocks are in different regions.
605 * A region token that encodes where this region is, and information
606 * about the clause owner for this block.
608 * The region encodes the try/catch/filter clause that owns this block
609 * as well as the type. -1 is a special value that represents a block
610 * that is in none of try/catch/filter.
613 mono_find_block_region (MonoCompile *cfg, int offset)
615 MonoMethodHeader *header = cfg->header;
616 MonoExceptionClause *clause;
619 for (i = 0; i < header->num_clauses; ++i) {
620 clause = &header->clauses [i];
621 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
622 (offset < (clause->handler_offset)))
623 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
625 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
626 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
627 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
628 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
629 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
631 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
634 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
635 return ((i + 1) << 8) | clause->flags;
642 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
644 MonoMethodHeader *header = cfg->header;
645 MonoExceptionClause *clause;
649 for (i = 0; i < header->num_clauses; ++i) {
650 clause = &header->clauses [i];
651 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
652 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
653 if (clause->flags == type)
654 res = g_list_append (res, clause);
661 mono_create_spvar_for_region (MonoCompile *cfg, int region)
665 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
669 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
670 /* prevent it from being register allocated */
671 var->flags |= MONO_INST_VOLATILE;
673 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
677 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
679 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
683 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
687 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
691 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
692 /* prevent it from being register allocated */
693 var->flags |= MONO_INST_VOLATILE;
695 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
701 * Returns the type used in the eval stack when @type is loaded.
702 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
705 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
709 type = mini_replace_type (type);
710 inst->klass = klass = mono_class_from_mono_type (type);
712 inst->type = STACK_MP;
717 switch (type->type) {
719 inst->type = STACK_INV;
723 case MONO_TYPE_BOOLEAN:
729 inst->type = STACK_I4;
734 case MONO_TYPE_FNPTR:
735 inst->type = STACK_PTR;
737 case MONO_TYPE_CLASS:
738 case MONO_TYPE_STRING:
739 case MONO_TYPE_OBJECT:
740 case MONO_TYPE_SZARRAY:
741 case MONO_TYPE_ARRAY:
742 inst->type = STACK_OBJ;
746 inst->type = STACK_I8;
750 inst->type = STACK_R8;
752 case MONO_TYPE_VALUETYPE:
753 if (type->data.klass->enumtype) {
754 type = mono_class_enum_basetype (type->data.klass);
758 inst->type = STACK_VTYPE;
761 case MONO_TYPE_TYPEDBYREF:
762 inst->klass = mono_defaults.typed_reference_class;
763 inst->type = STACK_VTYPE;
765 case MONO_TYPE_GENERICINST:
766 type = &type->data.generic_class->container_class->byval_arg;
770 g_assert (cfg->generic_sharing_context);
771 if (mini_is_gsharedvt_type (cfg, type)) {
772 g_assert (cfg->gsharedvt);
773 inst->type = STACK_VTYPE;
775 inst->type = STACK_OBJ;
779 g_error ("unknown type 0x%02x in eval stack type", type->type);
784 * The following tables are used to quickly validate the IL code in type_from_op ().
787 bin_num_table [STACK_MAX] [STACK_MAX] = {
788 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
789 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
790 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
793 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
794 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
795 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
800 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
803 /* reduce the size of this table */
805 bin_int_table [STACK_MAX] [STACK_MAX] = {
806 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
807 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
808 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
809 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
817 bin_comp_table [STACK_MAX] [STACK_MAX] = {
818 /* Inv i L p F & O vt */
820 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
821 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
822 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
823 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
824 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
825 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
826 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
829 /* reduce the size of this table */
831 shift_table [STACK_MAX] [STACK_MAX] = {
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
843 * Tables to map from the non-specific opcode to the matching
844 * type-specific opcode.
846 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
848 binops_op_map [STACK_MAX] = {
849 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
852 /* handles from CEE_NEG to CEE_CONV_U8 */
854 unops_op_map [STACK_MAX] = {
855 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
858 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
860 ovfops_op_map [STACK_MAX] = {
861 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
864 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
866 ovf2ops_op_map [STACK_MAX] = {
867 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
870 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
872 ovf3ops_op_map [STACK_MAX] = {
873 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
876 /* handles from CEE_BEQ to CEE_BLT_UN */
878 beqops_op_map [STACK_MAX] = {
879 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
882 /* handles from CEE_CEQ to CEE_CLT_UN */
884 ceqops_op_map [STACK_MAX] = {
885 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
889 * Sets ins->type (the type on the eval stack) according to the
890 * type of the opcode and the arguments to it.
891 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
893 * FIXME: this function sets ins->type unconditionally in some cases, but
894 * it should set it to invalid for some types (a conv.x on an object)
897 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
899 switch (ins->opcode) {
906 /* FIXME: check unverifiable args for STACK_MP */
907 ins->type = bin_num_table [src1->type] [src2->type];
908 ins->opcode += binops_op_map [ins->type];
915 ins->type = bin_int_table [src1->type] [src2->type];
916 ins->opcode += binops_op_map [ins->type];
921 ins->type = shift_table [src1->type] [src2->type];
922 ins->opcode += binops_op_map [ins->type];
927 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
928 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
929 ins->opcode = OP_LCOMPARE;
930 else if (src1->type == STACK_R8)
931 ins->opcode = OP_FCOMPARE;
933 ins->opcode = OP_ICOMPARE;
935 case OP_ICOMPARE_IMM:
936 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
937 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
938 ins->opcode = OP_LCOMPARE_IMM;
950 ins->opcode += beqops_op_map [src1->type];
953 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
954 ins->opcode += ceqops_op_map [src1->type];
960 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
961 ins->opcode += ceqops_op_map [src1->type];
965 ins->type = neg_table [src1->type];
966 ins->opcode += unops_op_map [ins->type];
969 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
970 ins->type = src1->type;
972 ins->type = STACK_INV;
973 ins->opcode += unops_op_map [ins->type];
979 ins->type = STACK_I4;
980 ins->opcode += unops_op_map [src1->type];
983 ins->type = STACK_R8;
984 switch (src1->type) {
987 ins->opcode = OP_ICONV_TO_R_UN;
990 ins->opcode = OP_LCONV_TO_R_UN;
994 case CEE_CONV_OVF_I1:
995 case CEE_CONV_OVF_U1:
996 case CEE_CONV_OVF_I2:
997 case CEE_CONV_OVF_U2:
998 case CEE_CONV_OVF_I4:
999 case CEE_CONV_OVF_U4:
1000 ins->type = STACK_I4;
1001 ins->opcode += ovf3ops_op_map [src1->type];
1003 case CEE_CONV_OVF_I_UN:
1004 case CEE_CONV_OVF_U_UN:
1005 ins->type = STACK_PTR;
1006 ins->opcode += ovf2ops_op_map [src1->type];
1008 case CEE_CONV_OVF_I1_UN:
1009 case CEE_CONV_OVF_I2_UN:
1010 case CEE_CONV_OVF_I4_UN:
1011 case CEE_CONV_OVF_U1_UN:
1012 case CEE_CONV_OVF_U2_UN:
1013 case CEE_CONV_OVF_U4_UN:
1014 ins->type = STACK_I4;
1015 ins->opcode += ovf2ops_op_map [src1->type];
1018 ins->type = STACK_PTR;
1019 switch (src1->type) {
1021 ins->opcode = OP_ICONV_TO_U;
1025 #if SIZEOF_VOID_P == 8
1026 ins->opcode = OP_LCONV_TO_U;
1028 ins->opcode = OP_MOVE;
1032 ins->opcode = OP_LCONV_TO_U;
1035 ins->opcode = OP_FCONV_TO_U;
1041 ins->type = STACK_I8;
1042 ins->opcode += unops_op_map [src1->type];
1044 case CEE_CONV_OVF_I8:
1045 case CEE_CONV_OVF_U8:
1046 ins->type = STACK_I8;
1047 ins->opcode += ovf3ops_op_map [src1->type];
1049 case CEE_CONV_OVF_U8_UN:
1050 case CEE_CONV_OVF_I8_UN:
1051 ins->type = STACK_I8;
1052 ins->opcode += ovf2ops_op_map [src1->type];
1056 ins->type = STACK_R8;
1057 ins->opcode += unops_op_map [src1->type];
1060 ins->type = STACK_R8;
1064 ins->type = STACK_I4;
1065 ins->opcode += ovfops_op_map [src1->type];
1068 case CEE_CONV_OVF_I:
1069 case CEE_CONV_OVF_U:
1070 ins->type = STACK_PTR;
1071 ins->opcode += ovfops_op_map [src1->type];
1074 case CEE_ADD_OVF_UN:
1076 case CEE_MUL_OVF_UN:
1078 case CEE_SUB_OVF_UN:
1079 ins->type = bin_num_table [src1->type] [src2->type];
1080 ins->opcode += ovfops_op_map [src1->type];
1081 if (ins->type == STACK_R8)
1082 ins->type = STACK_INV;
1084 case OP_LOAD_MEMBASE:
1085 ins->type = STACK_PTR;
1087 case OP_LOADI1_MEMBASE:
1088 case OP_LOADU1_MEMBASE:
1089 case OP_LOADI2_MEMBASE:
1090 case OP_LOADU2_MEMBASE:
1091 case OP_LOADI4_MEMBASE:
1092 case OP_LOADU4_MEMBASE:
1093 ins->type = STACK_PTR;
1095 case OP_LOADI8_MEMBASE:
1096 ins->type = STACK_I8;
1098 case OP_LOADR4_MEMBASE:
1099 case OP_LOADR8_MEMBASE:
1100 ins->type = STACK_R8;
1103 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1107 if (ins->type == STACK_MP)
1108 ins->klass = mono_defaults.object_class;
1113 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1119 param_table [STACK_MAX] [STACK_MAX] = {
1124 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1128 switch (args->type) {
1138 for (i = 0; i < sig->param_count; ++i) {
1139 switch (args [i].type) {
1143 if (!sig->params [i]->byref)
1147 if (sig->params [i]->byref)
1149 switch (sig->params [i]->type) {
1150 case MONO_TYPE_CLASS:
1151 case MONO_TYPE_STRING:
1152 case MONO_TYPE_OBJECT:
1153 case MONO_TYPE_SZARRAY:
1154 case MONO_TYPE_ARRAY:
1161 if (sig->params [i]->byref)
1163 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1172 /*if (!param_table [args [i].type] [sig->params [i]->type])
1180 * When we need a pointer to the current domain many times in a method, we
1181 * call mono_domain_get() once and we store the result in a local variable.
1182 * This function returns the variable that represents the MonoDomain*.
1184 inline static MonoInst *
1185 mono_get_domainvar (MonoCompile *cfg)
1187 if (!cfg->domainvar)
1188 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1189 return cfg->domainvar;
1193 * The got_var contains the address of the Global Offset Table when AOT
1197 mono_get_got_var (MonoCompile *cfg)
1199 #ifdef MONO_ARCH_NEED_GOT_VAR
1200 if (!cfg->compile_aot)
1202 if (!cfg->got_var) {
1203 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1205 return cfg->got_var;
1212 mono_get_vtable_var (MonoCompile *cfg)
1214 g_assert (cfg->generic_sharing_context);
1216 if (!cfg->rgctx_var) {
1217 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1218 /* force the var to be stack allocated */
1219 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1222 return cfg->rgctx_var;
1226 type_from_stack_type (MonoInst *ins) {
1227 switch (ins->type) {
1228 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1229 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1230 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1231 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1233 return &ins->klass->this_arg;
1234 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1235 case STACK_VTYPE: return &ins->klass->byval_arg;
1237 g_error ("stack type %d to monotype not handled\n", ins->type);
1242 static G_GNUC_UNUSED int
1243 type_to_stack_type (MonoType *t)
1245 t = mono_type_get_underlying_type (t);
1249 case MONO_TYPE_BOOLEAN:
1252 case MONO_TYPE_CHAR:
1259 case MONO_TYPE_FNPTR:
1261 case MONO_TYPE_CLASS:
1262 case MONO_TYPE_STRING:
1263 case MONO_TYPE_OBJECT:
1264 case MONO_TYPE_SZARRAY:
1265 case MONO_TYPE_ARRAY:
1273 case MONO_TYPE_VALUETYPE:
1274 case MONO_TYPE_TYPEDBYREF:
1276 case MONO_TYPE_GENERICINST:
1277 if (mono_type_generic_inst_is_valuetype (t))
1283 g_assert_not_reached ();
1290 array_access_to_klass (int opcode)
1294 return mono_defaults.byte_class;
1296 return mono_defaults.uint16_class;
1299 return mono_defaults.int_class;
1302 return mono_defaults.sbyte_class;
1305 return mono_defaults.int16_class;
1308 return mono_defaults.int32_class;
1310 return mono_defaults.uint32_class;
1313 return mono_defaults.int64_class;
1316 return mono_defaults.single_class;
1319 return mono_defaults.double_class;
1320 case CEE_LDELEM_REF:
1321 case CEE_STELEM_REF:
1322 return mono_defaults.object_class;
1324 g_assert_not_reached ();
1330 * We try to share variables when possible
1333 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1338 /* inlining can result in deeper stacks */
1339 if (slot >= cfg->header->max_stack)
1340 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1342 pos = ins->type - 1 + slot * STACK_MAX;
1344 switch (ins->type) {
1351 if ((vnum = cfg->intvars [pos]))
1352 return cfg->varinfo [vnum];
1353 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1354 cfg->intvars [pos] = res->inst_c0;
1357 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1363 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1366 * Don't use this if a generic_context is set, since that means AOT can't
1367 * look up the method using just the image+token.
1368 * table == 0 means this is a reference made from a wrapper.
1370 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1371 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1372 jump_info_token->image = image;
1373 jump_info_token->token = token;
1374 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1379 * This function is called to handle items that are left on the evaluation stack
1380 * at basic block boundaries. What happens is that we save the values to local variables
1381 * and we reload them later when first entering the target basic block (with the
1382 * handle_loaded_temps () function).
1383 * A single joint point will use the same variables (stored in the array bb->out_stack or
1384 * bb->in_stack, if the basic block is before or after the joint point).
1386 * This function needs to be called _before_ emitting the last instruction of
1387 * the bb (i.e. before emitting a branch).
1388 * If the stack merge fails at a join point, cfg->unverifiable is set.
1391 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1394 MonoBasicBlock *bb = cfg->cbb;
1395 MonoBasicBlock *outb;
1396 MonoInst *inst, **locals;
1401 if (cfg->verbose_level > 3)
1402 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1403 if (!bb->out_scount) {
1404 bb->out_scount = count;
1405 //printf ("bblock %d has out:", bb->block_num);
1407 for (i = 0; i < bb->out_count; ++i) {
1408 outb = bb->out_bb [i];
1409 /* exception handlers are linked, but they should not be considered for stack args */
1410 if (outb->flags & BB_EXCEPTION_HANDLER)
1412 //printf (" %d", outb->block_num);
1413 if (outb->in_stack) {
1415 bb->out_stack = outb->in_stack;
1421 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1422 for (i = 0; i < count; ++i) {
1424 * try to reuse temps already allocated for this purpouse, if they occupy the same
1425 * stack slot and if they are of the same type.
1426 * This won't cause conflicts since if 'local' is used to
1427 * store one of the values in the in_stack of a bblock, then
1428 * the same variable will be used for the same outgoing stack
1430 * This doesn't work when inlining methods, since the bblocks
1431 * in the inlined methods do not inherit their in_stack from
1432 * the bblock they are inlined to. See bug #58863 for an
1435 if (cfg->inlined_method)
1436 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1438 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1443 for (i = 0; i < bb->out_count; ++i) {
1444 outb = bb->out_bb [i];
1445 /* exception handlers are linked, but they should not be considered for stack args */
1446 if (outb->flags & BB_EXCEPTION_HANDLER)
1448 if (outb->in_scount) {
1449 if (outb->in_scount != bb->out_scount) {
1450 cfg->unverifiable = TRUE;
1453 continue; /* check they are the same locals */
1455 outb->in_scount = count;
1456 outb->in_stack = bb->out_stack;
1459 locals = bb->out_stack;
1461 for (i = 0; i < count; ++i) {
1462 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1463 inst->cil_code = sp [i]->cil_code;
1464 sp [i] = locals [i];
1465 if (cfg->verbose_level > 3)
1466 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1470 * It is possible that the out bblocks already have in_stack assigned, and
1471 * the in_stacks differ. In this case, we will store to all the different
1478 /* Find a bblock which has a different in_stack */
1480 while (bindex < bb->out_count) {
1481 outb = bb->out_bb [bindex];
1482 /* exception handlers are linked, but they should not be considered for stack args */
1483 if (outb->flags & BB_EXCEPTION_HANDLER) {
1487 if (outb->in_stack != locals) {
1488 for (i = 0; i < count; ++i) {
1489 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1490 inst->cil_code = sp [i]->cil_code;
1491 sp [i] = locals [i];
1492 if (cfg->verbose_level > 3)
1493 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1495 locals = outb->in_stack;
1504 /* Emit code which loads interface_offsets [klass->interface_id]
1505 * The array is stored in memory before vtable.
1508 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1510 if (cfg->compile_aot) {
1511 int ioffset_reg = alloc_preg (cfg);
1512 int iid_reg = alloc_preg (cfg);
1514 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1515 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1516 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1519 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1524 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1526 int ibitmap_reg = alloc_preg (cfg);
1527 #ifdef COMPRESSED_INTERFACE_BITMAP
1529 MonoInst *res, *ins;
1530 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1531 MONO_ADD_INS (cfg->cbb, ins);
1533 if (cfg->compile_aot)
1534 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1536 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1537 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1538 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1540 int ibitmap_byte_reg = alloc_preg (cfg);
1542 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1544 if (cfg->compile_aot) {
1545 int iid_reg = alloc_preg (cfg);
1546 int shifted_iid_reg = alloc_preg (cfg);
1547 int ibitmap_byte_address_reg = alloc_preg (cfg);
1548 int masked_iid_reg = alloc_preg (cfg);
1549 int iid_one_bit_reg = alloc_preg (cfg);
1550 int iid_bit_reg = alloc_preg (cfg);
1551 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1553 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1554 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1556 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1557 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1558 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1560 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1567 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1568 * stored in "klass_reg" implements the interface "klass".
1571 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1573 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1577 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1578 * stored in "vtable_reg" implements the interface "klass".
1581 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1583 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1587 * Emit code which checks whenever the interface id of @klass is smaller than
1588 * than the value given by max_iid_reg.
1591 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1592 MonoBasicBlock *false_target)
1594 if (cfg->compile_aot) {
1595 int iid_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1604 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1607 /* Same as above, but obtains max_iid from a vtable */
1609 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1610 MonoBasicBlock *false_target)
1612 int max_iid_reg = alloc_preg (cfg);
1614 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1615 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1618 /* Same as above, but obtains max_iid from a klass */
1620 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1621 MonoBasicBlock *false_target)
1623 int max_iid_reg = alloc_preg (cfg);
1625 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1626 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1630 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1632 int idepth_reg = alloc_preg (cfg);
1633 int stypes_reg = alloc_preg (cfg);
1634 int stype = alloc_preg (cfg);
1636 mono_class_setup_supertypes (klass);
1638 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1639 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1643 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1644 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1646 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1647 } else if (cfg->compile_aot) {
1648 int const_reg = alloc_preg (cfg);
1649 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1650 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1654 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1658 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1660 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1664 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1666 int intf_reg = alloc_preg (cfg);
1668 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1669 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1674 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1678 * Variant of the above that takes a register to the class, not the vtable.
1681 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1683 int intf_bit_reg = alloc_preg (cfg);
1685 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1686 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1691 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1695 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1698 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1699 } else if (cfg->compile_aot) {
1700 int const_reg = alloc_preg (cfg);
1701 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1702 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1706 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1710 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1712 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1716 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1718 if (cfg->compile_aot) {
1719 int const_reg = alloc_preg (cfg);
1720 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1721 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1725 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1729 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1732 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1735 int rank_reg = alloc_preg (cfg);
1736 int eclass_reg = alloc_preg (cfg);
1738 g_assert (!klass_inst);
1739 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1740 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1741 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1742 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1743 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1744 if (klass->cast_class == mono_defaults.object_class) {
1745 int parent_reg = alloc_preg (cfg);
1746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1747 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1748 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1749 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1750 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1751 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1752 } else if (klass->cast_class == mono_defaults.enum_class) {
1753 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1754 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1755 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1757 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1758 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1761 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1762 /* Check that the object is a vector too */
1763 int bounds_reg = alloc_preg (cfg);
1764 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1766 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1769 int idepth_reg = alloc_preg (cfg);
1770 int stypes_reg = alloc_preg (cfg);
1771 int stype = alloc_preg (cfg);
1773 mono_class_setup_supertypes (klass);
1775 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1776 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1778 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1780 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1781 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1782 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1787 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1789 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1793 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1797 g_assert (val == 0);
1802 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1805 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1808 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1811 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1813 #if SIZEOF_REGISTER == 8
1815 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1821 val_reg = alloc_preg (cfg);
1823 if (SIZEOF_REGISTER == 8)
1824 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1826 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1829 /* This could be optimized further if neccesary */
1831 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1838 #if !NO_UNALIGNED_ACCESS
1839 if (SIZEOF_REGISTER == 8) {
1841 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1846 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1854 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1859 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1871 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1878 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1879 g_assert (size < 10000);
1882 /* This could be optimized further if neccesary */
1884 cur_reg = alloc_preg (cfg);
1885 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1893 #if !NO_UNALIGNED_ACCESS
1894 if (SIZEOF_REGISTER == 8) {
1896 cur_reg = alloc_preg (cfg);
1897 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1898 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1907 cur_reg = alloc_preg (cfg);
1908 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1915 cur_reg = alloc_preg (cfg);
1916 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1917 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1923 cur_reg = alloc_preg (cfg);
1924 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1933 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1937 if (cfg->compile_aot) {
1938 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1939 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1941 ins->sreg2 = c->dreg;
1942 MONO_ADD_INS (cfg->cbb, ins);
1944 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1946 ins->inst_offset = mini_get_tls_offset (tls_key);
1947 MONO_ADD_INS (cfg->cbb, ins);
1954 * Emit IR to push the current LMF onto the LMF stack.
1957 emit_push_lmf (MonoCompile *cfg)
1960 * Emit IR to push the LMF:
1961 * lmf_addr = <lmf_addr from tls>
1962 * lmf->lmf_addr = lmf_addr
1963 * lmf->prev_lmf = *lmf_addr
1966 int lmf_reg, prev_lmf_reg;
1967 MonoInst *ins, *lmf_ins;
1972 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1973 /* Load current lmf */
1974 lmf_ins = mono_get_lmf_intrinsic (cfg);
1976 MONO_ADD_INS (cfg->cbb, lmf_ins);
1977 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1978 lmf_reg = ins->dreg;
1979 /* Save previous_lmf */
1980 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1982 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1985 * Store lmf_addr in a variable, so it can be allocated to a global register.
1987 if (!cfg->lmf_addr_var)
1988 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1991 ins = mono_get_jit_tls_intrinsic (cfg);
1993 int jit_tls_dreg = ins->dreg;
1995 MONO_ADD_INS (cfg->cbb, ins);
1996 lmf_reg = alloc_preg (cfg);
1997 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1999 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2002 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2004 MONO_ADD_INS (cfg->cbb, lmf_ins);
2007 MonoInst *args [16], *jit_tls_ins, *ins;
2009 /* Inline mono_get_lmf_addr () */
2010 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2012 /* Load mono_jit_tls_id */
2013 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2014 /* call pthread_getspecific () */
2015 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2016 /* lmf_addr = &jit_tls->lmf */
2017 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2020 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2024 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2026 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2027 lmf_reg = ins->dreg;
2029 prev_lmf_reg = alloc_preg (cfg);
2030 /* Save previous_lmf */
2031 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2032 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2034 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2041 * Emit IR to pop the current LMF from the LMF stack.
2044 emit_pop_lmf (MonoCompile *cfg)
2046 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2052 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2053 lmf_reg = ins->dreg;
2055 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2056 /* Load previous_lmf */
2057 prev_lmf_reg = alloc_preg (cfg);
2058 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2060 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2063 * Emit IR to pop the LMF:
2064 * *(lmf->lmf_addr) = lmf->prev_lmf
2066 /* This could be called before emit_push_lmf () */
2067 if (!cfg->lmf_addr_var)
2068 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2069 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2071 prev_lmf_reg = alloc_preg (cfg);
2072 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2073 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2078 emit_instrumentation_call (MonoCompile *cfg, void *func)
2080 MonoInst *iargs [1];
2083 * Avoid instrumenting inlined methods since it can
2084 * distort profiling results.
2086 if (cfg->method != cfg->current_method)
2089 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2090 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2091 mono_emit_jit_icall (cfg, func, iargs);
2096 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2099 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2102 type = mini_get_basic_type_from_generic (gsctx, type);
2103 type = mini_replace_type (type);
2104 switch (type->type) {
2105 case MONO_TYPE_VOID:
2106 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2109 case MONO_TYPE_BOOLEAN:
2112 case MONO_TYPE_CHAR:
2115 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2119 case MONO_TYPE_FNPTR:
2120 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2121 case MONO_TYPE_CLASS:
2122 case MONO_TYPE_STRING:
2123 case MONO_TYPE_OBJECT:
2124 case MONO_TYPE_SZARRAY:
2125 case MONO_TYPE_ARRAY:
2126 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2129 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2132 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2133 case MONO_TYPE_VALUETYPE:
2134 if (type->data.klass->enumtype) {
2135 type = mono_class_enum_basetype (type->data.klass);
2138 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2139 case MONO_TYPE_TYPEDBYREF:
2140 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2141 case MONO_TYPE_GENERICINST:
2142 type = &type->data.generic_class->container_class->byval_arg;
2145 case MONO_TYPE_MVAR:
2147 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2149 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2155 * target_type_is_incompatible:
2156 * @cfg: MonoCompile context
2158 * Check that the item @arg on the evaluation stack can be stored
2159 * in the target type (can be a local, or field, etc).
2160 * The cfg arg can be used to check if we need verification or just
2163 * Returns: non-0 value if arg can't be stored on a target.
2166 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2168 MonoType *simple_type;
2171 target = mini_replace_type (target);
2172 if (target->byref) {
2173 /* FIXME: check that the pointed to types match */
2174 if (arg->type == STACK_MP)
2175 return arg->klass != mono_class_from_mono_type (target);
2176 if (arg->type == STACK_PTR)
2181 simple_type = mono_type_get_underlying_type (target);
2182 switch (simple_type->type) {
2183 case MONO_TYPE_VOID:
2187 case MONO_TYPE_BOOLEAN:
2190 case MONO_TYPE_CHAR:
2193 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2197 /* STACK_MP is needed when setting pinned locals */
2198 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2203 case MONO_TYPE_FNPTR:
2205 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2206 * in native int. (#688008).
2208 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2211 case MONO_TYPE_CLASS:
2212 case MONO_TYPE_STRING:
2213 case MONO_TYPE_OBJECT:
2214 case MONO_TYPE_SZARRAY:
2215 case MONO_TYPE_ARRAY:
2216 if (arg->type != STACK_OBJ)
2218 /* FIXME: check type compatibility */
2222 if (arg->type != STACK_I8)
2227 if (arg->type != STACK_R8)
2230 case MONO_TYPE_VALUETYPE:
2231 if (arg->type != STACK_VTYPE)
2233 klass = mono_class_from_mono_type (simple_type);
2234 if (klass != arg->klass)
2237 case MONO_TYPE_TYPEDBYREF:
2238 if (arg->type != STACK_VTYPE)
2240 klass = mono_class_from_mono_type (simple_type);
2241 if (klass != arg->klass)
2244 case MONO_TYPE_GENERICINST:
2245 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2246 if (arg->type != STACK_VTYPE)
2248 klass = mono_class_from_mono_type (simple_type);
2249 if (klass != arg->klass)
2253 if (arg->type != STACK_OBJ)
2255 /* FIXME: check type compatibility */
2259 case MONO_TYPE_MVAR:
2260 g_assert (cfg->generic_sharing_context);
2261 if (mini_type_var_is_vt (cfg, simple_type)) {
2262 if (arg->type != STACK_VTYPE)
2265 if (arg->type != STACK_OBJ)
2270 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2276 * Prepare arguments for passing to a function call.
2277 * Return a non-zero value if the arguments can't be passed to the given
2279 * The type checks are not yet complete and some conversions may need
2280 * casts on 32 or 64 bit architectures.
2282 * FIXME: implement this using target_type_is_incompatible ()
2285 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2287 MonoType *simple_type;
2291 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2295 for (i = 0; i < sig->param_count; ++i) {
2296 if (sig->params [i]->byref) {
2297 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2301 simple_type = sig->params [i];
2302 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2304 switch (simple_type->type) {
2305 case MONO_TYPE_VOID:
2310 case MONO_TYPE_BOOLEAN:
2313 case MONO_TYPE_CHAR:
2316 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2322 case MONO_TYPE_FNPTR:
2323 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2326 case MONO_TYPE_CLASS:
2327 case MONO_TYPE_STRING:
2328 case MONO_TYPE_OBJECT:
2329 case MONO_TYPE_SZARRAY:
2330 case MONO_TYPE_ARRAY:
2331 if (args [i]->type != STACK_OBJ)
2336 if (args [i]->type != STACK_I8)
2341 if (args [i]->type != STACK_R8)
2344 case MONO_TYPE_VALUETYPE:
2345 if (simple_type->data.klass->enumtype) {
2346 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2349 if (args [i]->type != STACK_VTYPE)
2352 case MONO_TYPE_TYPEDBYREF:
2353 if (args [i]->type != STACK_VTYPE)
2356 case MONO_TYPE_GENERICINST:
2357 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2360 case MONO_TYPE_MVAR:
2362 if (args [i]->type != STACK_VTYPE)
2366 g_error ("unknown type 0x%02x in check_call_signature",
2374 callvirt_to_call (int opcode)
2377 case OP_CALL_MEMBASE:
2379 case OP_VOIDCALL_MEMBASE:
2381 case OP_FCALL_MEMBASE:
2383 case OP_VCALL_MEMBASE:
2385 case OP_LCALL_MEMBASE:
2388 g_assert_not_reached ();
2394 /* Either METHOD or IMT_ARG needs to be set */
2396 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2400 if (COMPILE_LLVM (cfg)) {
2401 method_reg = alloc_preg (cfg);
2404 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2405 } else if (cfg->compile_aot) {
2406 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2409 MONO_INST_NEW (cfg, ins, OP_PCONST);
2410 ins->inst_p0 = method;
2411 ins->dreg = method_reg;
2412 MONO_ADD_INS (cfg->cbb, ins);
2416 call->imt_arg_reg = method_reg;
2418 #ifdef MONO_ARCH_IMT_REG
2419 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2421 /* Need this to keep the IMT arg alive */
2422 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2427 #ifdef MONO_ARCH_IMT_REG
2428 method_reg = alloc_preg (cfg);
2431 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2432 } else if (cfg->compile_aot) {
2433 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2436 MONO_INST_NEW (cfg, ins, OP_PCONST);
2437 ins->inst_p0 = method;
2438 ins->dreg = method_reg;
2439 MONO_ADD_INS (cfg->cbb, ins);
2442 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2444 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2448 static MonoJumpInfo *
2449 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2451 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2455 ji->data.target = target;
2461 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2463 if (cfg->generic_sharing_context)
2464 return mono_class_check_context_used (klass);
2470 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2472 if (cfg->generic_sharing_context)
2473 return mono_method_check_context_used (method);
2479 * check_method_sharing:
2481 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2484 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2486 gboolean pass_vtable = FALSE;
2487 gboolean pass_mrgctx = FALSE;
2489 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2490 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2491 gboolean sharable = FALSE;
2493 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2496 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2497 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2498 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2500 sharable = sharing_enabled && context_sharable;
2504 * Pass vtable iff target method might
2505 * be shared, which means that sharing
2506 * is enabled for its class and its
2507 * context is sharable (and it's not a
2510 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2514 if (mini_method_get_context (cmethod) &&
2515 mini_method_get_context (cmethod)->method_inst) {
2516 g_assert (!pass_vtable);
2518 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2521 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2522 MonoGenericContext *context = mini_method_get_context (cmethod);
2523 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2525 if (sharing_enabled && context_sharable)
2527 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2532 if (out_pass_vtable)
2533 *out_pass_vtable = pass_vtable;
2534 if (out_pass_mrgctx)
2535 *out_pass_mrgctx = pass_mrgctx;
2538 inline static MonoCallInst *
2539 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2540 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2544 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2549 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2551 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2553 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2556 call->signature = sig;
2557 call->rgctx_reg = rgctx;
2558 sig_ret = mini_replace_type (sig->ret);
2560 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2563 if (mini_type_is_vtype (cfg, sig_ret)) {
2564 call->vret_var = cfg->vret_addr;
2565 //g_assert_not_reached ();
2567 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2568 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2571 temp->backend.is_pinvoke = sig->pinvoke;
2574 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2575 * address of return value to increase optimization opportunities.
2576 * Before vtype decomposition, the dreg of the call ins itself represents the
2577 * fact the call modifies the return value. After decomposition, the call will
2578 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2579 * will be transformed into an LDADDR.
2581 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2582 loada->dreg = alloc_preg (cfg);
2583 loada->inst_p0 = temp;
2584 /* We reference the call too since call->dreg could change during optimization */
2585 loada->inst_p1 = call;
2586 MONO_ADD_INS (cfg->cbb, loada);
2588 call->inst.dreg = temp->dreg;
2590 call->vret_var = loada;
2591 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2592 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2594 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2595 if (COMPILE_SOFT_FLOAT (cfg)) {
2597 * If the call has a float argument, we would need to do an r8->r4 conversion using
2598 * an icall, but that cannot be done during the call sequence since it would clobber
2599 * the call registers + the stack. So we do it before emitting the call.
2601 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2603 MonoInst *in = call->args [i];
2605 if (i >= sig->hasthis)
2606 t = sig->params [i - sig->hasthis];
2608 t = &mono_defaults.int_class->byval_arg;
2609 t = mono_type_get_underlying_type (t);
2611 if (!t->byref && t->type == MONO_TYPE_R4) {
2612 MonoInst *iargs [1];
2616 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2618 /* The result will be in an int vreg */
2619 call->args [i] = conv;
2625 call->need_unbox_trampoline = unbox_trampoline;
2628 if (COMPILE_LLVM (cfg))
2629 mono_llvm_emit_call (cfg, call);
2631 mono_arch_emit_call (cfg, call);
2633 mono_arch_emit_call (cfg, call);
2636 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2637 cfg->flags |= MONO_CFG_HAS_CALLS;
2643 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2645 #ifdef MONO_ARCH_RGCTX_REG
2646 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2647 cfg->uses_rgctx_reg = TRUE;
2648 call->rgctx_reg = TRUE;
2650 call->rgctx_arg_reg = rgctx_reg;
2657 inline static MonoInst*
2658 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2663 gboolean check_sp = FALSE;
2665 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2666 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2668 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2673 rgctx_reg = mono_alloc_preg (cfg);
2674 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2678 if (!cfg->stack_inbalance_var)
2679 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2681 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2682 ins->dreg = cfg->stack_inbalance_var->dreg;
2683 MONO_ADD_INS (cfg->cbb, ins);
2686 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2688 call->inst.sreg1 = addr->dreg;
2691 emit_imt_argument (cfg, call, NULL, imt_arg);
2693 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2698 sp_reg = mono_alloc_preg (cfg);
2700 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2702 MONO_ADD_INS (cfg->cbb, ins);
2704 /* Restore the stack so we don't crash when throwing the exception */
2705 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2706 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2707 MONO_ADD_INS (cfg->cbb, ins);
2709 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2710 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2714 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2716 return (MonoInst*)call;
2720 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2723 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2725 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2728 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2729 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2731 #ifndef DISABLE_REMOTING
2732 gboolean might_be_remote = FALSE;
2734 gboolean virtual = this != NULL;
2735 gboolean enable_for_aot = TRUE;
2739 gboolean need_unbox_trampoline;
2742 sig = mono_method_signature (method);
2745 rgctx_reg = mono_alloc_preg (cfg);
2746 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2749 if (method->string_ctor) {
2750 /* Create the real signature */
2751 /* FIXME: Cache these */
2752 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2753 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2758 context_used = mini_method_check_context_used (cfg, method);
2760 #ifndef DISABLE_REMOTING
2761 might_be_remote = this && sig->hasthis &&
2762 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2763 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2765 if (might_be_remote && context_used) {
2768 g_assert (cfg->generic_sharing_context);
2770 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2772 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2776 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2778 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2780 #ifndef DISABLE_REMOTING
2781 if (might_be_remote)
2782 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2785 call->method = method;
2786 call->inst.flags |= MONO_INST_HAS_METHOD;
2787 call->inst.inst_left = this;
2788 call->tail_call = tail;
2791 int vtable_reg, slot_reg, this_reg;
2794 this_reg = this->dreg;
2796 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2797 MonoInst *dummy_use;
2799 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2801 /* Make a call to delegate->invoke_impl */
2802 call->inst.inst_basereg = this_reg;
2803 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2804 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2806 /* We must emit a dummy use here because the delegate trampoline will
2807 replace the 'this' argument with the delegate target making this activation
2808 no longer a root for the delegate.
2809 This is an issue for delegates that target collectible code such as dynamic
2810 methods of GC'able assemblies.
2812 For a test case look into #667921.
2814 FIXME: a dummy use is not the best way to do it as the local register allocator
2815 will put it on a caller save register and spil it around the call.
2816 Ideally, we would either put it on a callee save register or only do the store part.
2818 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2820 return (MonoInst*)call;
2823 if ((!cfg->compile_aot || enable_for_aot) &&
2824 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2825 (MONO_METHOD_IS_FINAL (method) &&
2826 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2827 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2829 * the method is not virtual, we just need to ensure this is not null
2830 * and then we can call the method directly.
2832 #ifndef DISABLE_REMOTING
2833 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2835 * The check above ensures method is not gshared, this is needed since
2836 * gshared methods can't have wrappers.
2838 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2842 if (!method->string_ctor)
2843 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2845 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2846 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2848 * the method is virtual, but we can statically dispatch since either
2849 * it's class or the method itself are sealed.
2850 * But first we need to ensure it's not a null reference.
2852 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2854 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2856 vtable_reg = alloc_preg (cfg);
2857 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2858 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2861 guint32 imt_slot = mono_method_get_imt_slot (method);
2862 emit_imt_argument (cfg, call, call->method, imt_arg);
2863 slot_reg = vtable_reg;
2864 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2866 if (slot_reg == -1) {
2867 slot_reg = alloc_preg (cfg);
2868 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2869 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2872 slot_reg = vtable_reg;
2873 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2874 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2876 g_assert (mono_method_signature (method)->generic_param_count);
2877 emit_imt_argument (cfg, call, call->method, imt_arg);
2881 call->inst.sreg1 = slot_reg;
2882 call->inst.inst_offset = offset;
2883 call->virtual = TRUE;
2887 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2890 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2892 return (MonoInst*)call;
2896 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2898 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2902 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2909 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2912 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2914 return (MonoInst*)call;
2918 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2920 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2924 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2928 * mono_emit_abs_call:
2930 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2932 inline static MonoInst*
2933 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2934 MonoMethodSignature *sig, MonoInst **args)
2936 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2940 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2943 if (cfg->abs_patches == NULL)
2944 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2945 g_hash_table_insert (cfg->abs_patches, ji, ji);
2946 ins = mono_emit_native_call (cfg, ji, sig, args);
2947 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2952 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2954 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2955 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2959 * Native code might return non register sized integers
2960 * without initializing the upper bits.
2962 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2963 case OP_LOADI1_MEMBASE:
2964 widen_op = OP_ICONV_TO_I1;
2966 case OP_LOADU1_MEMBASE:
2967 widen_op = OP_ICONV_TO_U1;
2969 case OP_LOADI2_MEMBASE:
2970 widen_op = OP_ICONV_TO_I2;
2972 case OP_LOADU2_MEMBASE:
2973 widen_op = OP_ICONV_TO_U2;
2979 if (widen_op != -1) {
2980 int dreg = alloc_preg (cfg);
2983 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2984 widen->type = ins->type;
2994 get_memcpy_method (void)
2996 static MonoMethod *memcpy_method = NULL;
2997 if (!memcpy_method) {
2998 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3000 g_error ("Old corlib found. Install a new one");
3002 return memcpy_method;
3006 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3008 MonoClassField *field;
3009 gpointer iter = NULL;
3011 while ((field = mono_class_get_fields (klass, &iter))) {
3014 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3016 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3017 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3018 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3019 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3021 MonoClass *field_class = mono_class_from_mono_type (field->type);
3022 if (field_class->has_references)
3023 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3029 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3031 int card_table_shift_bits;
3032 gpointer card_table_mask;
3034 MonoInst *dummy_use;
3035 int nursery_shift_bits;
3036 size_t nursery_size;
3037 gboolean has_card_table_wb = FALSE;
3039 if (!cfg->gen_write_barriers)
3042 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3044 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3046 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3047 has_card_table_wb = TRUE;
3050 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3053 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3054 wbarrier->sreg1 = ptr->dreg;
3055 wbarrier->sreg2 = value->dreg;
3056 MONO_ADD_INS (cfg->cbb, wbarrier);
3057 } else if (card_table) {
3058 int offset_reg = alloc_preg (cfg);
3059 int card_reg = alloc_preg (cfg);
3062 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3063 if (card_table_mask)
3064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3066 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3067 * IMM's larger than 32bits.
3069 if (cfg->compile_aot) {
3070 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3072 MONO_INST_NEW (cfg, ins, OP_PCONST);
3073 ins->inst_p0 = card_table;
3074 ins->dreg = card_reg;
3075 MONO_ADD_INS (cfg->cbb, ins);
3078 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3079 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3081 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3082 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3085 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3089 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3091 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3092 unsigned need_wb = 0;
3097 /*types with references can't have alignment smaller than sizeof(void*) */
3098 if (align < SIZEOF_VOID_P)
3101 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3102 if (size > 32 * SIZEOF_VOID_P)
3105 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3107 /* We don't unroll more than 5 stores to avoid code bloat. */
3108 if (size > 5 * SIZEOF_VOID_P) {
3109 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3110 size += (SIZEOF_VOID_P - 1);
3111 size &= ~(SIZEOF_VOID_P - 1);
3113 EMIT_NEW_ICONST (cfg, iargs [2], size);
3114 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3115 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3119 destreg = iargs [0]->dreg;
3120 srcreg = iargs [1]->dreg;
3123 dest_ptr_reg = alloc_preg (cfg);
3124 tmp_reg = alloc_preg (cfg);
3127 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3129 while (size >= SIZEOF_VOID_P) {
3130 MonoInst *load_inst;
3131 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3132 load_inst->dreg = tmp_reg;
3133 load_inst->inst_basereg = srcreg;
3134 load_inst->inst_offset = offset;
3135 MONO_ADD_INS (cfg->cbb, load_inst);
3137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3140 emit_write_barrier (cfg, iargs [0], load_inst);
3142 offset += SIZEOF_VOID_P;
3143 size -= SIZEOF_VOID_P;
3146 /*tmp += sizeof (void*)*/
3147 if (size >= SIZEOF_VOID_P) {
3148 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3149 MONO_ADD_INS (cfg->cbb, iargs [0]);
3153 /* Those cannot be references since size < sizeof (void*) */
3155 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3156 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3162 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3163 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3169 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3170 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3179 * Emit code to copy a valuetype of type @klass whose address is stored in
3180 * @src->dreg to memory whose address is stored at @dest->dreg.
3183 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3185 MonoInst *iargs [4];
3186 int context_used, n;
3188 MonoMethod *memcpy_method;
3189 MonoInst *size_ins = NULL;
3190 MonoInst *memcpy_ins = NULL;
3194 * This check breaks with spilled vars... need to handle it during verification anyway.
3195 * g_assert (klass && klass == src->klass && klass == dest->klass);
3198 if (mini_is_gsharedvt_klass (cfg, klass)) {
3200 context_used = mini_class_check_context_used (cfg, klass);
3201 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3202 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3206 n = mono_class_native_size (klass, &align);
3208 n = mono_class_value_size (klass, &align);
3210 /* if native is true there should be no references in the struct */
3211 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3212 /* Avoid barriers when storing to the stack */
3213 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3214 (dest->opcode == OP_LDADDR))) {
3220 context_used = mini_class_check_context_used (cfg, klass);
3222 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3223 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3225 } else if (context_used) {
3226 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3228 if (cfg->compile_aot) {
3229 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3231 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3232 mono_class_compute_gc_descriptor (klass);
3237 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3239 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3244 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3245 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3246 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3251 iargs [2] = size_ins;
3253 EMIT_NEW_ICONST (cfg, iargs [2], n);
3255 memcpy_method = get_memcpy_method ();
3257 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3259 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3264 get_memset_method (void)
3266 static MonoMethod *memset_method = NULL;
3267 if (!memset_method) {
3268 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3270 g_error ("Old corlib found. Install a new one");
3272 return memset_method;
3276 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3278 MonoInst *iargs [3];
3279 int n, context_used;
3281 MonoMethod *memset_method;
3282 MonoInst *size_ins = NULL;
3283 MonoInst *bzero_ins = NULL;
3284 static MonoMethod *bzero_method;
3286 /* FIXME: Optimize this for the case when dest is an LDADDR */
3288 mono_class_init (klass);
3289 if (mini_is_gsharedvt_klass (cfg, klass)) {
3290 context_used = mini_class_check_context_used (cfg, klass);
3291 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3292 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3294 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3295 g_assert (bzero_method);
3297 iargs [1] = size_ins;
3298 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3302 n = mono_class_value_size (klass, &align);
3304 if (n <= sizeof (gpointer) * 5) {
3305 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3308 memset_method = get_memset_method ();
3310 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3311 EMIT_NEW_ICONST (cfg, iargs [2], n);
3312 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3317 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3319 MonoInst *this = NULL;
3321 g_assert (cfg->generic_sharing_context);
3323 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3324 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3325 !method->klass->valuetype)
3326 EMIT_NEW_ARGLOAD (cfg, this, 0);
3328 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3329 MonoInst *mrgctx_loc, *mrgctx_var;
3332 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3334 mrgctx_loc = mono_get_vtable_var (cfg);
3335 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3338 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3339 MonoInst *vtable_loc, *vtable_var;
3343 vtable_loc = mono_get_vtable_var (cfg);
3344 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3346 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3347 MonoInst *mrgctx_var = vtable_var;
3350 vtable_reg = alloc_preg (cfg);
3351 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3352 vtable_var->type = STACK_PTR;
3360 vtable_reg = alloc_preg (cfg);
3361 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3366 static MonoJumpInfoRgctxEntry *
3367 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3369 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3370 res->method = method;
3371 res->in_mrgctx = in_mrgctx;
3372 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3373 res->data->type = patch_type;
3374 res->data->data.target = patch_data;
3375 res->info_type = info_type;
3380 static inline MonoInst*
3381 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3383 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3387 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3388 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3390 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3391 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3393 return emit_rgctx_fetch (cfg, rgctx, entry);
3397 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3398 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3400 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3401 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3403 return emit_rgctx_fetch (cfg, rgctx, entry);
3407 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3408 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3410 MonoJumpInfoGSharedVtCall *call_info;
3411 MonoJumpInfoRgctxEntry *entry;
3414 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3415 call_info->sig = sig;
3416 call_info->method = cmethod;
3418 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3419 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3421 return emit_rgctx_fetch (cfg, rgctx, entry);
3426 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3427 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3429 MonoJumpInfoRgctxEntry *entry;
3432 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3433 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3435 return emit_rgctx_fetch (cfg, rgctx, entry);
3439 * emit_get_rgctx_method:
3441 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3442 * normal constants, else emit a load from the rgctx.
3445 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3446 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3448 if (!context_used) {
3451 switch (rgctx_type) {
3452 case MONO_RGCTX_INFO_METHOD:
3453 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3455 case MONO_RGCTX_INFO_METHOD_RGCTX:
3456 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3459 g_assert_not_reached ();
3462 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3463 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3465 return emit_rgctx_fetch (cfg, rgctx, entry);
3470 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3471 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3473 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3474 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3476 return emit_rgctx_fetch (cfg, rgctx, entry);
3480 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3482 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3483 MonoRuntimeGenericContextInfoTemplate *template;
3488 for (i = 0; i < info->num_entries; ++i) {
3489 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3491 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3495 if (info->num_entries == info->count_entries) {
3496 MonoRuntimeGenericContextInfoTemplate *new_entries;
3497 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3499 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3501 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3502 info->entries = new_entries;
3503 info->count_entries = new_count_entries;
3506 idx = info->num_entries;
3507 template = &info->entries [idx];
3508 template->info_type = rgctx_type;
3509 template->data = data;
3511 info->num_entries ++;
3517 * emit_get_gsharedvt_info:
3519 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3522 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3527 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3528 /* Load info->entries [idx] */
3529 dreg = alloc_preg (cfg);
3530 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3536 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3538 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3542 * On return the caller must check @klass for load errors.
3545 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3547 MonoInst *vtable_arg;
3551 context_used = mini_class_check_context_used (cfg, klass);
3554 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3555 klass, MONO_RGCTX_INFO_VTABLE);
3557 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3561 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3564 if (COMPILE_LLVM (cfg))
3565 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3567 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3568 #ifdef MONO_ARCH_VTABLE_REG
3569 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3570 cfg->uses_vtable_reg = TRUE;
3577 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3581 if (cfg->gen_seq_points && cfg->method == method) {
3582 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3584 ins->flags |= MONO_INST_NONEMPTY_STACK;
3585 MONO_ADD_INS (cfg->cbb, ins);
3590 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3592 if (mini_get_debug_options ()->better_cast_details) {
3593 int vtable_reg = alloc_preg (cfg);
3594 int klass_reg = alloc_preg (cfg);
3595 MonoBasicBlock *is_null_bb = NULL;
3597 int to_klass_reg, context_used;
3600 NEW_BBLOCK (cfg, is_null_bb);
3602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3603 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3606 tls_get = mono_get_jit_tls_intrinsic (cfg);
3608 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3612 MONO_ADD_INS (cfg->cbb, tls_get);
3613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3616 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3618 context_used = mini_class_check_context_used (cfg, klass);
3620 MonoInst *class_ins;
3622 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3623 to_klass_reg = class_ins->dreg;
3625 to_klass_reg = alloc_preg (cfg);
3626 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3628 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3631 MONO_START_BB (cfg, is_null_bb);
3633 *out_bblock = cfg->cbb;
3639 reset_cast_details (MonoCompile *cfg)
3641 /* Reset the variables holding the cast details */
3642 if (mini_get_debug_options ()->better_cast_details) {
3643 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3645 MONO_ADD_INS (cfg->cbb, tls_get);
3646 /* It is enough to reset the from field */
3647 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3652 * On return the caller must check @array_class for load errors
3655 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3657 int vtable_reg = alloc_preg (cfg);
3660 context_used = mini_class_check_context_used (cfg, array_class);
3662 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3664 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3666 if (cfg->opt & MONO_OPT_SHARED) {
3667 int class_reg = alloc_preg (cfg);
3668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3669 if (cfg->compile_aot) {
3670 int klass_reg = alloc_preg (cfg);
3671 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3672 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3676 } else if (context_used) {
3677 MonoInst *vtable_ins;
3679 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3680 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3682 if (cfg->compile_aot) {
3686 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3688 vt_reg = alloc_preg (cfg);
3689 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3690 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3693 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3699 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3701 reset_cast_details (cfg);
3705 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3706 * generic code is generated.
3709 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3711 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3714 MonoInst *rgctx, *addr;
3716 /* FIXME: What if the class is shared? We might not
3717 have to get the address of the method from the
3719 addr = emit_get_rgctx_method (cfg, context_used, method,
3720 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3722 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3724 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3726 gboolean pass_vtable, pass_mrgctx;
3727 MonoInst *rgctx_arg = NULL;
3729 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3730 g_assert (!pass_mrgctx);
3733 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3736 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3739 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3744 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3748 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3749 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3750 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3751 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3753 obj_reg = sp [0]->dreg;
3754 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3757 /* FIXME: generics */
3758 g_assert (klass->rank == 0);
3761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3762 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3764 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3765 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3768 MonoInst *element_class;
3770 /* This assertion is from the unboxcast insn */
3771 g_assert (klass->rank == 0);
3773 element_class = emit_get_rgctx_klass (cfg, context_used,
3774 klass->element_class, MONO_RGCTX_INFO_KLASS);
3776 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3777 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3779 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3780 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3781 reset_cast_details (cfg);
3784 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3785 MONO_ADD_INS (cfg->cbb, add);
3786 add->type = STACK_MP;
3793 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3795 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3796 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3800 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3806 args [1] = klass_inst;
3809 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3811 NEW_BBLOCK (cfg, is_ref_bb);
3812 NEW_BBLOCK (cfg, is_nullable_bb);
3813 NEW_BBLOCK (cfg, end_bb);
3814 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3816 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3818 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3819 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3821 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3822 addr_reg = alloc_dreg (cfg, STACK_MP);
3826 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3827 MONO_ADD_INS (cfg->cbb, addr);
3829 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3832 MONO_START_BB (cfg, is_ref_bb);
3834 /* Save the ref to a temporary */
3835 dreg = alloc_ireg (cfg);
3836 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3837 addr->dreg = addr_reg;
3838 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3839 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3842 MONO_START_BB (cfg, is_nullable_bb);
3845 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3846 MonoInst *unbox_call;
3847 MonoMethodSignature *unbox_sig;
3850 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3852 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3853 unbox_sig->ret = &klass->byval_arg;
3854 unbox_sig->param_count = 1;
3855 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3856 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3858 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3859 addr->dreg = addr_reg;
3862 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3865 MONO_START_BB (cfg, end_bb);
3868 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3870 *out_cbb = cfg->cbb;
3876 * Returns NULL and set the cfg exception on error.
3879 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3881 MonoInst *iargs [2];
3887 MonoInst *iargs [2];
3889 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3891 if (cfg->opt & MONO_OPT_SHARED)
3892 rgctx_info = MONO_RGCTX_INFO_KLASS;
3894 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3895 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3897 if (cfg->opt & MONO_OPT_SHARED) {
3898 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3900 alloc_ftn = mono_object_new;
3903 alloc_ftn = mono_object_new_specific;
3906 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3907 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3909 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3912 if (cfg->opt & MONO_OPT_SHARED) {
3913 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3914 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3916 alloc_ftn = mono_object_new;
3917 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3918 /* This happens often in argument checking code, eg. throw new FooException... */
3919 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3920 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3921 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3923 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3924 MonoMethod *managed_alloc = NULL;
3928 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3929 cfg->exception_ptr = klass;
3933 #ifndef MONO_CROSS_COMPILE
3934 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3937 if (managed_alloc) {
3938 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3939 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3941 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3943 guint32 lw = vtable->klass->instance_size;
3944 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3945 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3946 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3949 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3953 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3957 * Returns NULL and set the cfg exception on error.
3960 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3962 MonoInst *alloc, *ins;
3964 *out_cbb = cfg->cbb;
3966 if (mono_class_is_nullable (klass)) {
3967 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3970 /* FIXME: What if the class is shared? We might not
3971 have to get the method address from the RGCTX. */
3972 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3973 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3974 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3976 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3978 gboolean pass_vtable, pass_mrgctx;
3979 MonoInst *rgctx_arg = NULL;
3981 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3982 g_assert (!pass_mrgctx);
3985 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3988 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3991 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3995 if (mini_is_gsharedvt_klass (cfg, klass)) {
3996 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3997 MonoInst *res, *is_ref, *src_var, *addr;
4000 dreg = alloc_ireg (cfg);
4002 NEW_BBLOCK (cfg, is_ref_bb);
4003 NEW_BBLOCK (cfg, is_nullable_bb);
4004 NEW_BBLOCK (cfg, end_bb);
4005 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4007 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4009 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4010 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4013 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4016 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4017 ins->opcode = OP_STOREV_MEMBASE;
4019 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4020 res->type = STACK_OBJ;
4022 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4025 MONO_START_BB (cfg, is_ref_bb);
4026 addr_reg = alloc_ireg (cfg);
4028 /* val is a vtype, so has to load the value manually */
4029 src_var = get_vreg_to_inst (cfg, val->dreg);
4031 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4032 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4033 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4034 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4037 MONO_START_BB (cfg, is_nullable_bb);
4040 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4041 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4043 MonoMethodSignature *box_sig;
4046 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4047 * construct that method at JIT time, so have to do things by hand.
4049 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4050 box_sig->ret = &mono_defaults.object_class->byval_arg;
4051 box_sig->param_count = 1;
4052 box_sig->params [0] = &klass->byval_arg;
4053 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4054 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4055 res->type = STACK_OBJ;
4059 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4061 MONO_START_BB (cfg, end_bb);
4063 *out_cbb = cfg->cbb;
4067 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4071 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4078 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4081 MonoGenericContainer *container;
4082 MonoGenericInst *ginst;
4084 if (klass->generic_class) {
4085 container = klass->generic_class->container_class->generic_container;
4086 ginst = klass->generic_class->context.class_inst;
4087 } else if (klass->generic_container && context_used) {
4088 container = klass->generic_container;
4089 ginst = container->context.class_inst;
4094 for (i = 0; i < container->type_argc; ++i) {
4096 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4098 type = ginst->type_argv [i];
4099 if (mini_type_is_reference (cfg, type))
4105 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4108 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4110 MonoMethod *mono_castclass;
4113 mono_castclass = mono_marshal_get_castclass_with_cache ();
4115 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4116 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4117 reset_cast_details (cfg);
4118 *out_bblock = cfg->cbb;
4124 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4133 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4136 if (cfg->compile_aot) {
4137 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4138 cfg->castclass_cache_index ++;
4139 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4140 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4142 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4145 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4147 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4151 * Returns NULL and set the cfg exception on error.
4154 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4156 MonoBasicBlock *is_null_bb;
4157 int obj_reg = src->dreg;
4158 int vtable_reg = alloc_preg (cfg);
4160 MonoInst *klass_inst = NULL, *res;
4161 MonoBasicBlock *bblock;
4165 context_used = mini_class_check_context_used (cfg, klass);
4167 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4168 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4169 (*inline_costs) += 2;
4172 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4173 MonoMethod *mono_castclass;
4174 MonoInst *iargs [1];
4177 mono_castclass = mono_marshal_get_castclass (klass);
4180 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4181 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4182 iargs, ip, cfg->real_offset, TRUE, &bblock);
4183 reset_cast_details (cfg);
4184 CHECK_CFG_EXCEPTION;
4185 g_assert (costs > 0);
4187 cfg->real_offset += 5;
4189 (*inline_costs) += costs;
4198 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4199 MonoInst *cache_ins;
4201 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4206 /* klass - it's the second element of the cache entry*/
4207 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4210 args [2] = cache_ins;
4212 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4215 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4218 NEW_BBLOCK (cfg, is_null_bb);
4220 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4221 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4223 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4225 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4227 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4229 int klass_reg = alloc_preg (cfg);
4231 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4233 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4234 /* the remoting code is broken, access the class for now */
4235 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4236 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4238 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4239 cfg->exception_ptr = klass;
4242 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4244 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4245 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4247 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4249 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4250 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4254 MONO_START_BB (cfg, is_null_bb);
4256 reset_cast_details (cfg);
4267 * Returns NULL and set the cfg exception on error.
4270 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4273 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4274 int obj_reg = src->dreg;
4275 int vtable_reg = alloc_preg (cfg);
4276 int res_reg = alloc_ireg_ref (cfg);
4277 MonoInst *klass_inst = NULL;
4282 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4283 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4284 MonoInst *cache_ins;
4286 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4291 /* klass - it's the second element of the cache entry*/
4292 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4295 args [2] = cache_ins;
4297 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4300 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4303 NEW_BBLOCK (cfg, is_null_bb);
4304 NEW_BBLOCK (cfg, false_bb);
4305 NEW_BBLOCK (cfg, end_bb);
4307 /* Do the assignment at the beginning, so the other assignment can be if converted */
4308 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4309 ins->type = STACK_OBJ;
4312 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4313 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4315 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4317 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4318 g_assert (!context_used);
4319 /* the is_null_bb target simply copies the input register to the output */
4320 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4322 int klass_reg = alloc_preg (cfg);
4325 int rank_reg = alloc_preg (cfg);
4326 int eclass_reg = alloc_preg (cfg);
4328 g_assert (!context_used);
4329 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4331 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4332 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4333 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4334 if (klass->cast_class == mono_defaults.object_class) {
4335 int parent_reg = alloc_preg (cfg);
4336 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4337 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4338 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4339 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4340 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4341 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4342 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4344 } else if (klass->cast_class == mono_defaults.enum_class) {
4345 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4346 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4347 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4348 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4350 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4351 /* Check that the object is a vector too */
4352 int bounds_reg = alloc_preg (cfg);
4353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4354 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4355 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4358 /* the is_null_bb target simply copies the input register to the output */
4359 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4361 } else if (mono_class_is_nullable (klass)) {
4362 g_assert (!context_used);
4363 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4364 /* the is_null_bb target simply copies the input register to the output */
4365 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4367 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4368 g_assert (!context_used);
4369 /* the remoting code is broken, access the class for now */
4370 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4371 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4373 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4374 cfg->exception_ptr = klass;
4377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4379 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4380 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4386 /* the is_null_bb target simply copies the input register to the output */
4387 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4392 MONO_START_BB (cfg, false_bb);
4394 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4395 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4397 MONO_START_BB (cfg, is_null_bb);
4399 MONO_START_BB (cfg, end_bb);
4405 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4407 /* This opcode takes as input an object reference and a class, and returns:
4408 0) if the object is an instance of the class,
4409 1) if the object is not instance of the class,
4410 2) if the object is a proxy whose type cannot be determined */
4413 #ifndef DISABLE_REMOTING
4414 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4416 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4418 int obj_reg = src->dreg;
4419 int dreg = alloc_ireg (cfg);
4421 #ifndef DISABLE_REMOTING
4422 int klass_reg = alloc_preg (cfg);
4425 NEW_BBLOCK (cfg, true_bb);
4426 NEW_BBLOCK (cfg, false_bb);
4427 NEW_BBLOCK (cfg, end_bb);
4428 #ifndef DISABLE_REMOTING
4429 NEW_BBLOCK (cfg, false2_bb);
4430 NEW_BBLOCK (cfg, no_proxy_bb);
4433 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4434 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4436 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4437 #ifndef DISABLE_REMOTING
4438 NEW_BBLOCK (cfg, interface_fail_bb);
4441 tmp_reg = alloc_preg (cfg);
4442 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4443 #ifndef DISABLE_REMOTING
4444 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4445 MONO_START_BB (cfg, interface_fail_bb);
4446 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4448 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4450 tmp_reg = alloc_preg (cfg);
4451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4452 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4455 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4458 #ifndef DISABLE_REMOTING
4459 tmp_reg = alloc_preg (cfg);
4460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4461 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4463 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4464 tmp_reg = alloc_preg (cfg);
4465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4468 tmp_reg = alloc_preg (cfg);
4469 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4470 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4471 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4473 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4474 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4476 MONO_START_BB (cfg, no_proxy_bb);
4478 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4480 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4484 MONO_START_BB (cfg, false_bb);
4486 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4487 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4489 #ifndef DISABLE_REMOTING
4490 MONO_START_BB (cfg, false2_bb);
4492 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4493 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4496 MONO_START_BB (cfg, true_bb);
4498 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4500 MONO_START_BB (cfg, end_bb);
4503 MONO_INST_NEW (cfg, ins, OP_ICONST);
4505 ins->type = STACK_I4;
4511 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4513 /* This opcode takes as input an object reference and a class, and returns:
4514 0) if the object is an instance of the class,
4515 1) if the object is a proxy whose type cannot be determined
4516 an InvalidCastException exception is thrown otherwhise*/
4519 #ifndef DISABLE_REMOTING
4520 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4522 MonoBasicBlock *ok_result_bb;
4524 int obj_reg = src->dreg;
4525 int dreg = alloc_ireg (cfg);
4526 int tmp_reg = alloc_preg (cfg);
4528 #ifndef DISABLE_REMOTING
4529 int klass_reg = alloc_preg (cfg);
4530 NEW_BBLOCK (cfg, end_bb);
4533 NEW_BBLOCK (cfg, ok_result_bb);
4535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4538 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4540 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4541 #ifndef DISABLE_REMOTING
4542 NEW_BBLOCK (cfg, interface_fail_bb);
4544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4545 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4546 MONO_START_BB (cfg, interface_fail_bb);
4547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4549 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4551 tmp_reg = alloc_preg (cfg);
4552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4554 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4556 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4560 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4564 #ifndef DISABLE_REMOTING
4565 NEW_BBLOCK (cfg, no_proxy_bb);
4567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4568 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4569 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4571 tmp_reg = alloc_preg (cfg);
4572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4575 tmp_reg = alloc_preg (cfg);
4576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4580 NEW_BBLOCK (cfg, fail_1_bb);
4582 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4584 MONO_START_BB (cfg, fail_1_bb);
4586 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4587 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4589 MONO_START_BB (cfg, no_proxy_bb);
4591 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4593 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4597 MONO_START_BB (cfg, ok_result_bb);
4599 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4601 #ifndef DISABLE_REMOTING
4602 MONO_START_BB (cfg, end_bb);
4606 MONO_INST_NEW (cfg, ins, OP_ICONST);
4608 ins->type = STACK_I4;
4614 * Returns NULL and set the cfg exception on error.
4616 static G_GNUC_UNUSED MonoInst*
4617 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4621 gpointer trampoline;
4622 MonoInst *obj, *method_ins, *tramp_ins;
4626 // FIXME reenable optimisation for virtual case
4631 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4634 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4638 obj = handle_alloc (cfg, klass, FALSE, 0);
4642 /* Inline the contents of mono_delegate_ctor */
4644 /* Set target field */
4645 /* Optimize away setting of NULL target */
4646 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4647 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4648 if (cfg->gen_write_barriers) {
4649 dreg = alloc_preg (cfg);
4650 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4651 emit_write_barrier (cfg, ptr, target);
4655 /* Set method field */
4656 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4657 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4660 * To avoid looking up the compiled code belonging to the target method
4661 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4662 * store it, and we fill it after the method has been compiled.
4664 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4665 MonoInst *code_slot_ins;
4668 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4670 domain = mono_domain_get ();
4671 mono_domain_lock (domain);
4672 if (!domain_jit_info (domain)->method_code_hash)
4673 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4674 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4676 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4677 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4679 mono_domain_unlock (domain);
4681 if (cfg->compile_aot)
4682 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4684 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4689 if (cfg->compile_aot) {
4690 MonoDelegateClassMethodPair *del_tramp;
4692 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4693 del_tramp->klass = klass;
4694 del_tramp->method = context_used ? NULL : method;
4695 del_tramp->virtual = virtual;
4696 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4699 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4701 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4702 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4705 /* Set invoke_impl field */
4707 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4709 dreg = alloc_preg (cfg);
4710 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4713 dreg = alloc_preg (cfg);
4714 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4715 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4718 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4724 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4726 MonoJitICallInfo *info;
4728 /* Need to register the icall so it gets an icall wrapper */
4729 info = mono_get_array_new_va_icall (rank);
4731 cfg->flags |= MONO_CFG_HAS_VARARGS;
4733 /* mono_array_new_va () needs a vararg calling convention */
4734 cfg->disable_llvm = TRUE;
4736 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4737 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4741 mono_emit_load_got_addr (MonoCompile *cfg)
4743 MonoInst *getaddr, *dummy_use;
4745 if (!cfg->got_var || cfg->got_var_allocated)
4748 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4749 getaddr->cil_code = cfg->header->code;
4750 getaddr->dreg = cfg->got_var->dreg;
4752 /* Add it to the start of the first bblock */
4753 if (cfg->bb_entry->code) {
4754 getaddr->next = cfg->bb_entry->code;
4755 cfg->bb_entry->code = getaddr;
4758 MONO_ADD_INS (cfg->bb_entry, getaddr);
4760 cfg->got_var_allocated = TRUE;
4763 * Add a dummy use to keep the got_var alive, since real uses might
4764 * only be generated by the back ends.
4765 * Add it to end_bblock, so the variable's lifetime covers the whole
4767 * It would be better to make the usage of the got var explicit in all
4768 * cases when the backend needs it (i.e. calls, throw etc.), so this
4769 * wouldn't be needed.
4771 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4772 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4775 static int inline_limit;
4776 static gboolean inline_limit_inited;
4779 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4781 MonoMethodHeaderSummary header;
4783 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4784 MonoMethodSignature *sig = mono_method_signature (method);
4788 if (cfg->disable_inline)
4790 if (cfg->generic_sharing_context)
4793 if (cfg->inline_depth > 10)
4796 #ifdef MONO_ARCH_HAVE_LMF_OPS
4797 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4798 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4799 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4804 if (!mono_method_get_header_summary (method, &header))
4807 /*runtime, icall and pinvoke are checked by summary call*/
4808 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4809 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4810 (mono_class_is_marshalbyref (method->klass)) ||
4814 /* also consider num_locals? */
4815 /* Do the size check early to avoid creating vtables */
4816 if (!inline_limit_inited) {
4817 if (g_getenv ("MONO_INLINELIMIT"))
4818 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4820 inline_limit = INLINE_LENGTH_LIMIT;
4821 inline_limit_inited = TRUE;
4823 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4827 * if we can initialize the class of the method right away, we do,
4828 * otherwise we don't allow inlining if the class needs initialization,
4829 * since it would mean inserting a call to mono_runtime_class_init()
4830 * inside the inlined code
4832 if (!(cfg->opt & MONO_OPT_SHARED)) {
4833 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4834 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4835 vtable = mono_class_vtable (cfg->domain, method->klass);
4838 if (!cfg->compile_aot)
4839 mono_runtime_class_init (vtable);
4840 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4841 if (cfg->run_cctors && method->klass->has_cctor) {
4842 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4843 if (!method->klass->runtime_info)
4844 /* No vtable created yet */
4846 vtable = mono_class_vtable (cfg->domain, method->klass);
4849 /* This makes so that inline cannot trigger */
4850 /* .cctors: too many apps depend on them */
4851 /* running with a specific order... */
4852 if (! vtable->initialized)
4854 mono_runtime_class_init (vtable);
4856 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4857 if (!method->klass->runtime_info)
4858 /* No vtable created yet */
4860 vtable = mono_class_vtable (cfg->domain, method->klass);
4863 if (!vtable->initialized)
4868 * If we're compiling for shared code
4869 * the cctor will need to be run at aot method load time, for example,
4870 * or at the end of the compilation of the inlining method.
4872 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4877 * CAS - do not inline methods with declarative security
4878 * Note: this has to be before any possible return TRUE;
4880 if (mono_security_method_has_declsec (method))
4883 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4884 if (mono_arch_is_soft_float ()) {
4886 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4888 for (i = 0; i < sig->param_count; ++i)
4889 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4894 if (g_list_find (cfg->dont_inline, method))
4901 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4903 if (!cfg->compile_aot) {
4905 if (vtable->initialized)
4909 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4910 if (cfg->method == method)
4914 if (!mono_class_needs_cctor_run (klass, method))
4917 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4918 /* The initialization is already done before the method is called */
4925 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4929 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4932 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4935 mono_class_init (klass);
4936 size = mono_class_array_element_size (klass);
4939 mult_reg = alloc_preg (cfg);
4940 array_reg = arr->dreg;
4941 index_reg = index->dreg;
4943 #if SIZEOF_REGISTER == 8
4944 /* The array reg is 64 bits but the index reg is only 32 */
4945 if (COMPILE_LLVM (cfg)) {
4947 index2_reg = index_reg;
4949 index2_reg = alloc_preg (cfg);
4950 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4953 if (index->type == STACK_I8) {
4954 index2_reg = alloc_preg (cfg);
4955 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4957 index2_reg = index_reg;
4962 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4964 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4965 if (size == 1 || size == 2 || size == 4 || size == 8) {
4966 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4968 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4969 ins->klass = mono_class_get_element_class (klass);
4970 ins->type = STACK_MP;
4976 add_reg = alloc_ireg_mp (cfg);
4979 MonoInst *rgctx_ins;
4982 g_assert (cfg->generic_sharing_context);
4983 context_used = mini_class_check_context_used (cfg, klass);
4984 g_assert (context_used);
4985 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4986 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4988 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4990 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4991 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4992 ins->klass = mono_class_get_element_class (klass);
4993 ins->type = STACK_MP;
4994 MONO_ADD_INS (cfg->cbb, ins);
4999 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5001 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5003 int bounds_reg = alloc_preg (cfg);
5004 int add_reg = alloc_ireg_mp (cfg);
5005 int mult_reg = alloc_preg (cfg);
5006 int mult2_reg = alloc_preg (cfg);
5007 int low1_reg = alloc_preg (cfg);
5008 int low2_reg = alloc_preg (cfg);
5009 int high1_reg = alloc_preg (cfg);
5010 int high2_reg = alloc_preg (cfg);
5011 int realidx1_reg = alloc_preg (cfg);
5012 int realidx2_reg = alloc_preg (cfg);
5013 int sum_reg = alloc_preg (cfg);
5014 int index1, index2, tmpreg;
5018 mono_class_init (klass);
5019 size = mono_class_array_element_size (klass);
5021 index1 = index_ins1->dreg;
5022 index2 = index_ins2->dreg;
5024 #if SIZEOF_REGISTER == 8
5025 /* The array reg is 64 bits but the index reg is only 32 */
5026 if (COMPILE_LLVM (cfg)) {
5029 tmpreg = alloc_preg (cfg);
5030 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5032 tmpreg = alloc_preg (cfg);
5033 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5037 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5041 /* range checking */
5042 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5043 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5045 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5046 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5047 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5048 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5049 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5050 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5051 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5053 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5054 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5055 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5056 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5057 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5058 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5059 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5061 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5062 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5063 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5064 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5065 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5067 ins->type = STACK_MP;
5069 MONO_ADD_INS (cfg->cbb, ins);
5076 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5080 MonoMethod *addr_method;
5083 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5086 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
5088 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5089 /* emit_ldelema_2 depends on OP_LMUL */
5090 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
5091 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
5095 element_size = mono_class_array_element_size (cmethod->klass->element_class);
5096 addr_method = mono_marshal_get_array_address (rank, element_size);
5097 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5102 static MonoBreakPolicy
5103 always_insert_breakpoint (MonoMethod *method)
5105 return MONO_BREAK_POLICY_ALWAYS;
5108 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5111 * mono_set_break_policy:
5112 * policy_callback: the new callback function
5114 * Allow embedders to decide wherther to actually obey breakpoint instructions
5115 * (both break IL instructions and Debugger.Break () method calls), for example
5116 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5117 * untrusted or semi-trusted code.
5119 * @policy_callback will be called every time a break point instruction needs to
5120 * be inserted with the method argument being the method that calls Debugger.Break()
5121 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5122 * if it wants the breakpoint to not be effective in the given method.
5123 * #MONO_BREAK_POLICY_ALWAYS is the default.
5126 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5128 if (policy_callback)
5129 break_policy_func = policy_callback;
5131 break_policy_func = always_insert_breakpoint;
5135 should_insert_brekpoint (MonoMethod *method) {
5136 switch (break_policy_func (method)) {
5137 case MONO_BREAK_POLICY_ALWAYS:
5139 case MONO_BREAK_POLICY_NEVER:
5141 case MONO_BREAK_POLICY_ON_DBG:
5142 g_warning ("mdb no longer supported");
5145 g_warning ("Incorrect value returned from break policy callback");
5150 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5152 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5154 MonoInst *addr, *store, *load;
5155 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5157 /* the bounds check is already done by the callers */
5158 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5160 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5161 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5162 if (mini_type_is_reference (cfg, fsig->params [2]))
5163 emit_write_barrier (cfg, addr, load);
5165 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5166 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5173 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5175 return mini_type_is_reference (cfg, &klass->byval_arg);
5179 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5181 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5182 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5183 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5184 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5185 MonoInst *iargs [3];
5188 mono_class_setup_vtable (obj_array);
5189 g_assert (helper->slot);
5191 if (sp [0]->type != STACK_OBJ)
5193 if (sp [2]->type != STACK_OBJ)
5200 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5204 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5207 // FIXME-VT: OP_ICONST optimization
5208 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5209 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5210 ins->opcode = OP_STOREV_MEMBASE;
5211 } else if (sp [1]->opcode == OP_ICONST) {
5212 int array_reg = sp [0]->dreg;
5213 int index_reg = sp [1]->dreg;
5214 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5217 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5218 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5220 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5221 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5222 if (generic_class_is_reference_type (cfg, klass))
5223 emit_write_barrier (cfg, addr, sp [2]);
5230 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5235 eklass = mono_class_from_mono_type (fsig->params [2]);
5237 eklass = mono_class_from_mono_type (fsig->ret);
5240 return emit_array_store (cfg, eklass, args, FALSE);
5242 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5243 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5249 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5253 //Only allow for valuetypes
5254 if (!param_klass->valuetype || !return_klass->valuetype)
5258 if (param_klass->has_references || return_klass->has_references)
5261 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5262 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5263 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5266 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5267 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5270 //And have the same size
5271 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5277 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5279 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5280 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5282 //Valuetypes that are semantically equivalent
5283 if (is_unsafe_mov_compatible (param_klass, return_klass))
5286 //Arrays of valuetypes that are semantically equivalent
5287 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5294 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5296 #ifdef MONO_ARCH_SIMD_INTRINSICS
5297 MonoInst *ins = NULL;
5299 if (cfg->opt & MONO_OPT_SIMD) {
5300 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5306 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5310 emit_memory_barrier (MonoCompile *cfg, int kind)
5312 MonoInst *ins = NULL;
5313 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5314 MONO_ADD_INS (cfg->cbb, ins);
5315 ins->backend.memory_barrier_kind = kind;
5321 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5323 MonoInst *ins = NULL;
5326 /* The LLVM backend supports these intrinsics */
5327 if (cmethod->klass == mono_defaults.math_class) {
5328 if (strcmp (cmethod->name, "Sin") == 0) {
5330 } else if (strcmp (cmethod->name, "Cos") == 0) {
5332 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5334 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5339 MONO_INST_NEW (cfg, ins, opcode);
5340 ins->type = STACK_R8;
5341 ins->dreg = mono_alloc_freg (cfg);
5342 ins->sreg1 = args [0]->dreg;
5343 MONO_ADD_INS (cfg->cbb, ins);
5347 if (cfg->opt & MONO_OPT_CMOV) {
5348 if (strcmp (cmethod->name, "Min") == 0) {
5349 if (fsig->params [0]->type == MONO_TYPE_I4)
5351 if (fsig->params [0]->type == MONO_TYPE_U4)
5352 opcode = OP_IMIN_UN;
5353 else if (fsig->params [0]->type == MONO_TYPE_I8)
5355 else if (fsig->params [0]->type == MONO_TYPE_U8)
5356 opcode = OP_LMIN_UN;
5357 } else if (strcmp (cmethod->name, "Max") == 0) {
5358 if (fsig->params [0]->type == MONO_TYPE_I4)
5360 if (fsig->params [0]->type == MONO_TYPE_U4)
5361 opcode = OP_IMAX_UN;
5362 else if (fsig->params [0]->type == MONO_TYPE_I8)
5364 else if (fsig->params [0]->type == MONO_TYPE_U8)
5365 opcode = OP_LMAX_UN;
5370 MONO_INST_NEW (cfg, ins, opcode);
5371 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5372 ins->dreg = mono_alloc_ireg (cfg);
5373 ins->sreg1 = args [0]->dreg;
5374 ins->sreg2 = args [1]->dreg;
5375 MONO_ADD_INS (cfg->cbb, ins);
5383 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5385 if (cmethod->klass == mono_defaults.array_class) {
5386 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5387 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5388 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5389 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5390 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5391 return emit_array_unsafe_mov (cfg, fsig, args);
5398 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5400 MonoInst *ins = NULL;
5402 static MonoClass *runtime_helpers_class = NULL;
5403 if (! runtime_helpers_class)
5404 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5405 "System.Runtime.CompilerServices", "RuntimeHelpers");
5407 if (cmethod->klass == mono_defaults.string_class) {
5408 if (strcmp (cmethod->name, "get_Chars") == 0) {
5409 int dreg = alloc_ireg (cfg);
5410 int index_reg = alloc_preg (cfg);
5411 int mult_reg = alloc_preg (cfg);
5412 int add_reg = alloc_preg (cfg);
5414 #if SIZEOF_REGISTER == 8
5415 /* The array reg is 64 bits but the index reg is only 32 */
5416 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5418 index_reg = args [1]->dreg;
5420 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5422 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5423 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5424 add_reg = ins->dreg;
5425 /* Avoid a warning */
5427 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5431 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5432 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5433 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5435 type_from_op (ins, NULL, NULL);
5437 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5438 int dreg = alloc_ireg (cfg);
5439 /* Decompose later to allow more optimizations */
5440 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5441 ins->type = STACK_I4;
5442 ins->flags |= MONO_INST_FAULT;
5443 cfg->cbb->has_array_access = TRUE;
5444 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5447 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5448 int mult_reg = alloc_preg (cfg);
5449 int add_reg = alloc_preg (cfg);
5451 /* The corlib functions check for oob already. */
5452 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5453 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5454 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5455 return cfg->cbb->last_ins;
5458 } else if (cmethod->klass == mono_defaults.object_class) {
5460 if (strcmp (cmethod->name, "GetType") == 0) {
5461 int dreg = alloc_ireg_ref (cfg);
5462 int vt_reg = alloc_preg (cfg);
5463 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5464 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5465 type_from_op (ins, NULL, NULL);
5468 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5469 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5470 int dreg = alloc_ireg (cfg);
5471 int t1 = alloc_ireg (cfg);
5473 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5474 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5475 ins->type = STACK_I4;
5479 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5480 MONO_INST_NEW (cfg, ins, OP_NOP);
5481 MONO_ADD_INS (cfg->cbb, ins);
5485 } else if (cmethod->klass == mono_defaults.array_class) {
5486 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5487 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5489 #ifndef MONO_BIG_ARRAYS
5491 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5494 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5495 int dreg = alloc_ireg (cfg);
5496 int bounds_reg = alloc_ireg_mp (cfg);
5497 MonoBasicBlock *end_bb, *szarray_bb;
5498 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5500 NEW_BBLOCK (cfg, end_bb);
5501 NEW_BBLOCK (cfg, szarray_bb);
5503 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5504 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5507 /* Non-szarray case */
5509 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5510 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5512 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5513 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5514 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5515 MONO_START_BB (cfg, szarray_bb);
5518 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5519 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5521 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5522 MONO_START_BB (cfg, end_bb);
5524 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5525 ins->type = STACK_I4;
5531 if (cmethod->name [0] != 'g')
5534 if (strcmp (cmethod->name, "get_Rank") == 0) {
5535 int dreg = alloc_ireg (cfg);
5536 int vtable_reg = alloc_preg (cfg);
5537 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5538 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5539 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5540 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5541 type_from_op (ins, NULL, NULL);
5544 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5545 int dreg = alloc_ireg (cfg);
5547 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5548 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5549 type_from_op (ins, NULL, NULL);
5554 } else if (cmethod->klass == runtime_helpers_class) {
5556 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5557 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5561 } else if (cmethod->klass == mono_defaults.thread_class) {
5562 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5563 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5564 MONO_ADD_INS (cfg->cbb, ins);
5566 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5567 return emit_memory_barrier (cfg, FullBarrier);
5569 } else if (cmethod->klass == mono_defaults.monitor_class) {
5571 /* FIXME this should be integrated to the check below once we support the trampoline version */
5572 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5573 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5574 MonoMethod *fast_method = NULL;
5576 /* Avoid infinite recursion */
5577 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5580 fast_method = mono_monitor_get_fast_path (cmethod);
5584 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5588 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5589 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5592 if (COMPILE_LLVM (cfg)) {
5594 * Pass the argument normally, the LLVM backend will handle the
5595 * calling convention problems.
5597 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5599 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5600 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5601 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5602 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5605 return (MonoInst*)call;
5606 } else if (strcmp (cmethod->name, "Exit") == 0) {
5609 if (COMPILE_LLVM (cfg)) {
5610 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5612 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5613 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5614 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5615 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5618 return (MonoInst*)call;
5620 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5622 MonoMethod *fast_method = NULL;
5624 /* Avoid infinite recursion */
5625 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5626 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5627 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5630 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5631 strcmp (cmethod->name, "Exit") == 0)
5632 fast_method = mono_monitor_get_fast_path (cmethod);
5636 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5639 } else if (cmethod->klass->image == mono_defaults.corlib &&
5640 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5641 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5644 #if SIZEOF_REGISTER == 8
5645 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5648 emit_memory_barrier (cfg, FullBarrier);
5650 /* 64 bit reads are already atomic */
5651 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5652 load_ins->dreg = mono_alloc_preg (cfg);
5653 load_ins->inst_basereg = args [0]->dreg;
5654 load_ins->inst_offset = 0;
5655 MONO_ADD_INS (cfg->cbb, load_ins);
5657 emit_memory_barrier (cfg, FullBarrier);
5663 if (strcmp (cmethod->name, "Increment") == 0) {
5664 MonoInst *ins_iconst;
5667 if (fsig->params [0]->type == MONO_TYPE_I4) {
5668 opcode = OP_ATOMIC_ADD_I4;
5669 cfg->has_atomic_add_i4 = TRUE;
5671 #if SIZEOF_REGISTER == 8
5672 else if (fsig->params [0]->type == MONO_TYPE_I8)
5673 opcode = OP_ATOMIC_ADD_I8;
5676 if (!mono_arch_opcode_supported (opcode))
5678 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5679 ins_iconst->inst_c0 = 1;
5680 ins_iconst->dreg = mono_alloc_ireg (cfg);
5681 MONO_ADD_INS (cfg->cbb, ins_iconst);
5683 MONO_INST_NEW (cfg, ins, opcode);
5684 ins->dreg = mono_alloc_ireg (cfg);
5685 ins->inst_basereg = args [0]->dreg;
5686 ins->inst_offset = 0;
5687 ins->sreg2 = ins_iconst->dreg;
5688 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5689 MONO_ADD_INS (cfg->cbb, ins);
5691 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5692 MonoInst *ins_iconst;
5695 if (fsig->params [0]->type == MONO_TYPE_I4) {
5696 opcode = OP_ATOMIC_ADD_I4;
5697 cfg->has_atomic_add_i4 = TRUE;
5699 #if SIZEOF_REGISTER == 8
5700 else if (fsig->params [0]->type == MONO_TYPE_I8)
5701 opcode = OP_ATOMIC_ADD_I8;
5704 if (!mono_arch_opcode_supported (opcode))
5706 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5707 ins_iconst->inst_c0 = -1;
5708 ins_iconst->dreg = mono_alloc_ireg (cfg);
5709 MONO_ADD_INS (cfg->cbb, ins_iconst);
5711 MONO_INST_NEW (cfg, ins, opcode);
5712 ins->dreg = mono_alloc_ireg (cfg);
5713 ins->inst_basereg = args [0]->dreg;
5714 ins->inst_offset = 0;
5715 ins->sreg2 = ins_iconst->dreg;
5716 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5717 MONO_ADD_INS (cfg->cbb, ins);
5719 } else if (strcmp (cmethod->name, "Add") == 0) {
5722 if (fsig->params [0]->type == MONO_TYPE_I4) {
5723 opcode = OP_ATOMIC_ADD_I4;
5724 cfg->has_atomic_add_i4 = TRUE;
5726 #if SIZEOF_REGISTER == 8
5727 else if (fsig->params [0]->type == MONO_TYPE_I8)
5728 opcode = OP_ATOMIC_ADD_I8;
5731 if (!mono_arch_opcode_supported (opcode))
5733 MONO_INST_NEW (cfg, ins, opcode);
5734 ins->dreg = mono_alloc_ireg (cfg);
5735 ins->inst_basereg = args [0]->dreg;
5736 ins->inst_offset = 0;
5737 ins->sreg2 = args [1]->dreg;
5738 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5739 MONO_ADD_INS (cfg->cbb, ins);
5743 if (strcmp (cmethod->name, "Exchange") == 0) {
5745 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5747 if (fsig->params [0]->type == MONO_TYPE_I4) {
5748 opcode = OP_ATOMIC_EXCHANGE_I4;
5749 cfg->has_atomic_exchange_i4 = TRUE;
5751 #if SIZEOF_REGISTER == 8
5752 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5753 (fsig->params [0]->type == MONO_TYPE_I))
5754 opcode = OP_ATOMIC_EXCHANGE_I8;
5756 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5757 opcode = OP_ATOMIC_EXCHANGE_I4;
5758 cfg->has_atomic_exchange_i4 = TRUE;
5764 if (!mono_arch_opcode_supported (opcode))
5767 MONO_INST_NEW (cfg, ins, opcode);
5768 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5769 ins->inst_basereg = args [0]->dreg;
5770 ins->inst_offset = 0;
5771 ins->sreg2 = args [1]->dreg;
5772 MONO_ADD_INS (cfg->cbb, ins);
5774 switch (fsig->params [0]->type) {
5776 ins->type = STACK_I4;
5780 ins->type = STACK_I8;
5782 case MONO_TYPE_OBJECT:
5783 ins->type = STACK_OBJ;
5786 g_assert_not_reached ();
5789 if (cfg->gen_write_barriers && is_ref)
5790 emit_write_barrier (cfg, args [0], args [1]);
5793 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5795 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5796 if (fsig->params [1]->type == MONO_TYPE_I4)
5798 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5799 size = sizeof (gpointer);
5800 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5803 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5805 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5806 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5807 ins->sreg1 = args [0]->dreg;
5808 ins->sreg2 = args [1]->dreg;
5809 ins->sreg3 = args [2]->dreg;
5810 ins->type = STACK_I4;
5811 MONO_ADD_INS (cfg->cbb, ins);
5812 cfg->has_atomic_cas_i4 = TRUE;
5813 } else if (size == 8) {
5814 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5816 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5817 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5818 ins->sreg1 = args [0]->dreg;
5819 ins->sreg2 = args [1]->dreg;
5820 ins->sreg3 = args [2]->dreg;
5821 ins->type = STACK_I8;
5822 MONO_ADD_INS (cfg->cbb, ins);
5824 /* g_assert_not_reached (); */
5826 if (cfg->gen_write_barriers && is_ref)
5827 emit_write_barrier (cfg, args [0], args [1]);
5830 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5831 ins = emit_memory_barrier (cfg, FullBarrier);
5835 } else if (cmethod->klass->image == mono_defaults.corlib) {
5836 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5837 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5838 if (should_insert_brekpoint (cfg->method)) {
5839 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5841 MONO_INST_NEW (cfg, ins, OP_NOP);
5842 MONO_ADD_INS (cfg->cbb, ins);
5846 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5847 && strcmp (cmethod->klass->name, "Environment") == 0) {
5849 EMIT_NEW_ICONST (cfg, ins, 1);
5851 EMIT_NEW_ICONST (cfg, ins, 0);
5855 } else if (cmethod->klass == mono_defaults.math_class) {
5857 * There is general branches code for Min/Max, but it does not work for
5859 * http://everything2.com/?node_id=1051618
5861 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5862 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5864 MonoJumpInfoToken *ji;
5867 cfg->disable_llvm = TRUE;
5869 if (args [0]->opcode == OP_GOT_ENTRY) {
5870 pi = args [0]->inst_p1;
5871 g_assert (pi->opcode == OP_PATCH_INFO);
5872 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5875 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5876 ji = args [0]->inst_p0;
5879 NULLIFY_INS (args [0]);
5882 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5883 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5884 ins->dreg = mono_alloc_ireg (cfg);
5886 ins->inst_p0 = mono_string_to_utf8 (s);
5887 MONO_ADD_INS (cfg->cbb, ins);
5892 #ifdef MONO_ARCH_SIMD_INTRINSICS
5893 if (cfg->opt & MONO_OPT_SIMD) {
5894 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5900 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5904 if (COMPILE_LLVM (cfg)) {
5905 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5910 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5914 * This entry point could be used later for arbitrary method
5917 inline static MonoInst*
5918 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5919 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5921 if (method->klass == mono_defaults.string_class) {
5922 /* managed string allocation support */
5923 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5924 MonoInst *iargs [2];
5925 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5926 MonoMethod *managed_alloc = NULL;
5928 g_assert (vtable); /*Should not fail since it System.String*/
5929 #ifndef MONO_CROSS_COMPILE
5930 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5934 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5935 iargs [1] = args [0];
5936 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5943 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5945 MonoInst *store, *temp;
5948 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5949 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5952 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5953 * would be different than the MonoInst's used to represent arguments, and
5954 * the ldelema implementation can't deal with that.
5955 * Solution: When ldelema is used on an inline argument, create a var for
5956 * it, emit ldelema on that var, and emit the saving code below in
5957 * inline_method () if needed.
5959 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5960 cfg->args [i] = temp;
5961 /* This uses cfg->args [i] which is set by the preceeding line */
5962 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5963 store->cil_code = sp [0]->cil_code;
5968 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5969 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5971 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5973 check_inline_called_method_name_limit (MonoMethod *called_method)
5976 static const char *limit = NULL;
5978 if (limit == NULL) {
5979 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5981 if (limit_string != NULL)
5982 limit = limit_string;
5987 if (limit [0] != '\0') {
5988 char *called_method_name = mono_method_full_name (called_method, TRUE);
5990 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5991 g_free (called_method_name);
5993 //return (strncmp_result <= 0);
5994 return (strncmp_result == 0);
6001 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6003 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6006 static const char *limit = NULL;
6008 if (limit == NULL) {
6009 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6010 if (limit_string != NULL) {
6011 limit = limit_string;
6017 if (limit [0] != '\0') {
6018 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6020 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6021 g_free (caller_method_name);
6023 //return (strncmp_result <= 0);
6024 return (strncmp_result == 0);
6032 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6034 static double r8_0 = 0.0;
6038 rtype = mini_replace_type (rtype);
6042 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6043 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6044 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6045 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6046 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6047 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6048 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6049 ins->type = STACK_R8;
6050 ins->inst_p0 = (void*)&r8_0;
6052 MONO_ADD_INS (cfg->cbb, ins);
6053 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6054 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6055 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6056 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6057 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6059 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6064 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6068 rtype = mini_replace_type (rtype);
6072 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6073 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6074 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6075 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6076 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6077 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6078 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6079 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6080 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6081 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6082 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6083 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6085 emit_init_rvar (cfg, dreg, rtype);
6089 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6091 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6093 MonoInst *var = cfg->locals [local];
6094 if (COMPILE_SOFT_FLOAT (cfg)) {
6096 int reg = alloc_dreg (cfg, var->type);
6097 emit_init_rvar (cfg, reg, type);
6098 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6101 emit_init_rvar (cfg, var->dreg, type);
6103 emit_dummy_init_rvar (cfg, var->dreg, type);
6110 * Return the cost of inlining CMETHOD.
6113 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6114 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6116 MonoInst *ins, *rvar = NULL;
6117 MonoMethodHeader *cheader;
6118 MonoBasicBlock *ebblock, *sbblock;
6120 MonoMethod *prev_inlined_method;
6121 MonoInst **prev_locals, **prev_args;
6122 MonoType **prev_arg_types;
6123 guint prev_real_offset;
6124 GHashTable *prev_cbb_hash;
6125 MonoBasicBlock **prev_cil_offset_to_bb;
6126 MonoBasicBlock *prev_cbb;
6127 unsigned char* prev_cil_start;
6128 guint32 prev_cil_offset_to_bb_len;
6129 MonoMethod *prev_current_method;
6130 MonoGenericContext *prev_generic_context;
6131 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6133 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6135 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6136 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6139 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6140 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6144 if (cfg->verbose_level > 2)
6145 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6147 if (!cmethod->inline_info) {
6148 cfg->stat_inlineable_methods++;
6149 cmethod->inline_info = 1;
6152 /* allocate local variables */
6153 cheader = mono_method_get_header (cmethod);
6155 if (cheader == NULL || mono_loader_get_last_error ()) {
6156 MonoLoaderError *error = mono_loader_get_last_error ();
6159 mono_metadata_free_mh (cheader);
6160 if (inline_always && error)
6161 mono_cfg_set_exception (cfg, error->exception_type);
6163 mono_loader_clear_error ();
6167 /*Must verify before creating locals as it can cause the JIT to assert.*/
6168 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6169 mono_metadata_free_mh (cheader);
6173 /* allocate space to store the return value */
6174 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6175 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6178 prev_locals = cfg->locals;
6179 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6180 for (i = 0; i < cheader->num_locals; ++i)
6181 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6183 /* allocate start and end blocks */
6184 /* This is needed so if the inline is aborted, we can clean up */
6185 NEW_BBLOCK (cfg, sbblock);
6186 sbblock->real_offset = real_offset;
6188 NEW_BBLOCK (cfg, ebblock);
6189 ebblock->block_num = cfg->num_bblocks++;
6190 ebblock->real_offset = real_offset;
6192 prev_args = cfg->args;
6193 prev_arg_types = cfg->arg_types;
6194 prev_inlined_method = cfg->inlined_method;
6195 cfg->inlined_method = cmethod;
6196 cfg->ret_var_set = FALSE;
6197 cfg->inline_depth ++;
6198 prev_real_offset = cfg->real_offset;
6199 prev_cbb_hash = cfg->cbb_hash;
6200 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6201 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6202 prev_cil_start = cfg->cil_start;
6203 prev_cbb = cfg->cbb;
6204 prev_current_method = cfg->current_method;
6205 prev_generic_context = cfg->generic_context;
6206 prev_ret_var_set = cfg->ret_var_set;
6207 prev_disable_inline = cfg->disable_inline;
6209 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6212 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6214 ret_var_set = cfg->ret_var_set;
6216 cfg->inlined_method = prev_inlined_method;
6217 cfg->real_offset = prev_real_offset;
6218 cfg->cbb_hash = prev_cbb_hash;
6219 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6220 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6221 cfg->cil_start = prev_cil_start;
6222 cfg->locals = prev_locals;
6223 cfg->args = prev_args;
6224 cfg->arg_types = prev_arg_types;
6225 cfg->current_method = prev_current_method;
6226 cfg->generic_context = prev_generic_context;
6227 cfg->ret_var_set = prev_ret_var_set;
6228 cfg->disable_inline = prev_disable_inline;
6229 cfg->inline_depth --;
6231 if ((costs >= 0 && costs < 60) || inline_always) {
6232 if (cfg->verbose_level > 2)
6233 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6235 cfg->stat_inlined_methods++;
6237 /* always add some code to avoid block split failures */
6238 MONO_INST_NEW (cfg, ins, OP_NOP);
6239 MONO_ADD_INS (prev_cbb, ins);
6241 prev_cbb->next_bb = sbblock;
6242 link_bblock (cfg, prev_cbb, sbblock);
6245 * Get rid of the begin and end bblocks if possible to aid local
6248 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6250 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6251 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6253 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6254 MonoBasicBlock *prev = ebblock->in_bb [0];
6255 mono_merge_basic_blocks (cfg, prev, ebblock);
6257 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6258 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6259 cfg->cbb = prev_cbb;
6263 * Its possible that the rvar is set in some prev bblock, but not in others.
6269 for (i = 0; i < ebblock->in_count; ++i) {
6270 bb = ebblock->in_bb [i];
6272 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6275 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6283 *out_cbb = cfg->cbb;
6287 * If the inlined method contains only a throw, then the ret var is not
6288 * set, so set it to a dummy value.
6291 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6293 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6296 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6299 if (cfg->verbose_level > 2)
6300 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6301 cfg->exception_type = MONO_EXCEPTION_NONE;
6302 mono_loader_clear_error ();
6304 /* This gets rid of the newly added bblocks */
6305 cfg->cbb = prev_cbb;
6307 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6312 * Some of these comments may well be out-of-date.
6313 * Design decisions: we do a single pass over the IL code (and we do bblock
6314 * splitting/merging in the few cases when it's required: a back jump to an IL
6315 * address that was not already seen as bblock starting point).
6316 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6317 * Complex operations are decomposed in simpler ones right away. We need to let the
6318 * arch-specific code peek and poke inside this process somehow (except when the
6319 * optimizations can take advantage of the full semantic info of coarse opcodes).
6320 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6321 * MonoInst->opcode initially is the IL opcode or some simplification of that
6322 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6323 * opcode with value bigger than OP_LAST.
6324 * At this point the IR can be handed over to an interpreter, a dumb code generator
6325 * or to the optimizing code generator that will translate it to SSA form.
6327 * Profiling directed optimizations.
6328 * We may compile by default with few or no optimizations and instrument the code
6329 * or the user may indicate what methods to optimize the most either in a config file
6330 * or through repeated runs where the compiler applies offline the optimizations to
6331 * each method and then decides if it was worth it.
6334 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6335 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6336 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6337 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6338 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6339 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6340 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6341 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6343 /* offset from br.s -> br like opcodes */
6344 #define BIG_BRANCH_OFFSET 13
6347 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6349 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6351 return b == NULL || b == bb;
6355 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6357 unsigned char *ip = start;
6358 unsigned char *target;
6361 MonoBasicBlock *bblock;
6362 const MonoOpcode *opcode;
6365 cli_addr = ip - start;
6366 i = mono_opcode_value ((const guint8 **)&ip, end);
6369 opcode = &mono_opcodes [i];
6370 switch (opcode->argument) {
6371 case MonoInlineNone:
6374 case MonoInlineString:
6375 case MonoInlineType:
6376 case MonoInlineField:
6377 case MonoInlineMethod:
6380 case MonoShortInlineR:
6387 case MonoShortInlineVar:
6388 case MonoShortInlineI:
6391 case MonoShortInlineBrTarget:
6392 target = start + cli_addr + 2 + (signed char)ip [1];
6393 GET_BBLOCK (cfg, bblock, target);
6396 GET_BBLOCK (cfg, bblock, ip);
6398 case MonoInlineBrTarget:
6399 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6400 GET_BBLOCK (cfg, bblock, target);
6403 GET_BBLOCK (cfg, bblock, ip);
6405 case MonoInlineSwitch: {
6406 guint32 n = read32 (ip + 1);
6409 cli_addr += 5 + 4 * n;
6410 target = start + cli_addr;
6411 GET_BBLOCK (cfg, bblock, target);
6413 for (j = 0; j < n; ++j) {
6414 target = start + cli_addr + (gint32)read32 (ip);
6415 GET_BBLOCK (cfg, bblock, target);
6425 g_assert_not_reached ();
6428 if (i == CEE_THROW) {
6429 unsigned char *bb_start = ip - 1;
6431 /* Find the start of the bblock containing the throw */
6433 while ((bb_start >= start) && !bblock) {
6434 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6438 bblock->out_of_line = 1;
6448 static inline MonoMethod *
6449 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6453 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6454 method = mono_method_get_wrapper_data (m, token);
6456 method = mono_class_inflate_generic_method (method, context);
6458 method = mono_get_method_full (m->klass->image, token, klass, context);
6464 static inline MonoMethod *
6465 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6467 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6469 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6475 static inline MonoClass*
6476 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6481 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6482 klass = mono_method_get_wrapper_data (method, token);
6484 klass = mono_class_inflate_generic_class (klass, context);
6486 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6487 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6490 mono_class_init (klass);
6494 static inline MonoMethodSignature*
6495 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6497 MonoMethodSignature *fsig;
6499 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6502 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6504 fsig = mono_inflate_generic_signature (fsig, context, &error);
6506 g_assert (mono_error_ok (&error));
6509 fsig = mono_metadata_parse_signature (method->klass->image, token);
6515 * Returns TRUE if the JIT should abort inlining because "callee"
6516 * is influenced by security attributes.
6519 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6523 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6527 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6528 if (result == MONO_JIT_SECURITY_OK)
6531 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6532 /* Generate code to throw a SecurityException before the actual call/link */
6533 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6536 NEW_ICONST (cfg, args [0], 4);
6537 NEW_METHODCONST (cfg, args [1], caller);
6538 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6539 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6540 /* don't hide previous results */
6541 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6542 cfg->exception_data = result;
6550 throw_exception (void)
6552 static MonoMethod *method = NULL;
6555 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6556 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6563 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6565 MonoMethod *thrower = throw_exception ();
6568 EMIT_NEW_PCONST (cfg, args [0], ex);
6569 mono_emit_method_call (cfg, thrower, args, NULL);
6573 * Return the original method is a wrapper is specified. We can only access
6574 * the custom attributes from the original method.
6577 get_original_method (MonoMethod *method)
6579 if (method->wrapper_type == MONO_WRAPPER_NONE)
6582 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6583 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6586 /* in other cases we need to find the original method */
6587 return mono_marshal_method_from_wrapper (method);
6591 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6592 MonoBasicBlock *bblock, unsigned char *ip)
6594 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6595 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6597 emit_throw_exception (cfg, ex);
6601 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6602 MonoBasicBlock *bblock, unsigned char *ip)
6604 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6605 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6607 emit_throw_exception (cfg, ex);
6611 * Check that the IL instructions at ip are the array initialization
6612 * sequence and return the pointer to the data and the size.
6615 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6618 * newarr[System.Int32]
6620 * ldtoken field valuetype ...
6621 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6623 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6625 guint32 token = read32 (ip + 7);
6626 guint32 field_token = read32 (ip + 2);
6627 guint32 field_index = field_token & 0xffffff;
6629 const char *data_ptr;
6631 MonoMethod *cmethod;
6632 MonoClass *dummy_class;
6633 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6637 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6641 *out_field_token = field_token;
6643 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6646 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6648 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6649 case MONO_TYPE_BOOLEAN:
6653 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6654 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6655 case MONO_TYPE_CHAR:
6672 if (size > mono_type_size (field->type, &dummy_align))
6675 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6676 if (!image_is_dynamic (method->klass->image)) {
6677 field_index = read32 (ip + 2) & 0xffffff;
6678 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6679 data_ptr = mono_image_rva_map (method->klass->image, rva);
6680 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6681 /* for aot code we do the lookup on load */
6682 if (aot && data_ptr)
6683 return GUINT_TO_POINTER (rva);
6685 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6687 data_ptr = mono_field_get_data (field);
6695 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6697 char *method_fname = mono_method_full_name (method, TRUE);
6699 MonoMethodHeader *header = mono_method_get_header (method);
6701 if (header->code_size == 0)
6702 method_code = g_strdup ("method body is empty.");
6704 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6705 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6706 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6707 g_free (method_fname);
6708 g_free (method_code);
6709 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6713 set_exception_object (MonoCompile *cfg, MonoException *exception)
6715 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6716 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6717 cfg->exception_ptr = exception;
6721 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6724 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6725 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6726 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6727 /* Optimize reg-reg moves away */
6729 * Can't optimize other opcodes, since sp[0] might point to
6730 * the last ins of a decomposed opcode.
6732 sp [0]->dreg = (cfg)->locals [n]->dreg;
6734 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6739 * ldloca inhibits many optimizations so try to get rid of it in common
6742 static inline unsigned char *
6743 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6753 local = read16 (ip + 2);
6757 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6758 /* From the INITOBJ case */
6759 token = read32 (ip + 2);
6760 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6761 CHECK_TYPELOAD (klass);
6762 type = mini_replace_type (&klass->byval_arg);
6763 emit_init_local (cfg, local, type, TRUE);
6771 is_exception_class (MonoClass *class)
6774 if (class == mono_defaults.exception_class)
6776 class = class->parent;
6782 * is_jit_optimizer_disabled:
6784 * Determine whenever M's assembly has a DebuggableAttribute with the
6785 * IsJITOptimizerDisabled flag set.
6788 is_jit_optimizer_disabled (MonoMethod *m)
6790 MonoAssembly *ass = m->klass->image->assembly;
6791 MonoCustomAttrInfo* attrs;
6792 static MonoClass *klass;
6794 gboolean val = FALSE;
6797 if (ass->jit_optimizer_disabled_inited)
6798 return ass->jit_optimizer_disabled;
6801 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6804 ass->jit_optimizer_disabled = FALSE;
6805 mono_memory_barrier ();
6806 ass->jit_optimizer_disabled_inited = TRUE;
6810 attrs = mono_custom_attrs_from_assembly (ass);
6812 for (i = 0; i < attrs->num_attrs; ++i) {
6813 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6816 MonoMethodSignature *sig;
6818 if (!attr->ctor || attr->ctor->klass != klass)
6820 /* Decode the attribute. See reflection.c */
6821 len = attr->data_size;
6822 p = (const char*)attr->data;
6823 g_assert (read16 (p) == 0x0001);
6826 // FIXME: Support named parameters
6827 sig = mono_method_signature (attr->ctor);
6828 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6830 /* Two boolean arguments */
6834 mono_custom_attrs_free (attrs);
6837 ass->jit_optimizer_disabled = val;
6838 mono_memory_barrier ();
6839 ass->jit_optimizer_disabled_inited = TRUE;
6845 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6847 gboolean supported_tail_call;
6850 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6851 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6853 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6856 for (i = 0; i < fsig->param_count; ++i) {
6857 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6858 /* These can point to the current method's stack */
6859 supported_tail_call = FALSE;
6861 if (fsig->hasthis && cmethod->klass->valuetype)
6862 /* this might point to the current method's stack */
6863 supported_tail_call = FALSE;
6864 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6865 supported_tail_call = FALSE;
6866 if (cfg->method->save_lmf)
6867 supported_tail_call = FALSE;
6868 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6869 supported_tail_call = FALSE;
6870 if (call_opcode != CEE_CALL)
6871 supported_tail_call = FALSE;
6873 /* Debugging support */
6875 if (supported_tail_call) {
6876 if (!mono_debug_count ())
6877 supported_tail_call = FALSE;
6881 return supported_tail_call;
6884 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6885 * it to the thread local value based on the tls_offset field. Every other kind of access to
6886 * the field causes an assert.
6889 is_magic_tls_access (MonoClassField *field)
6891 if (strcmp (field->name, "tlsdata"))
6893 if (strcmp (field->parent->name, "ThreadLocal`1"))
6895 return field->parent->image == mono_defaults.corlib;
6898 /* emits the code needed to access a managed tls var (like ThreadStatic)
6899 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6900 * pointer for the current thread.
6901 * Returns the MonoInst* representing the address of the tls var.
6904 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6907 int static_data_reg, array_reg, dreg;
6908 int offset2_reg, idx_reg;
6909 // inlined access to the tls data
6910 // idx = (offset >> 24) - 1;
6911 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6912 static_data_reg = alloc_ireg (cfg);
6913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
6914 idx_reg = alloc_ireg (cfg);
6915 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6917 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6918 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6919 array_reg = alloc_ireg (cfg);
6920 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6921 offset2_reg = alloc_ireg (cfg);
6922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6923 dreg = alloc_ireg (cfg);
6924 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6929 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6930 * this address is cached per-method in cached_tls_addr.
6933 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6935 MonoInst *load, *addr, *temp, *store, *thread_ins;
6936 MonoClassField *offset_field;
6938 if (*cached_tls_addr) {
6939 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6942 thread_ins = mono_get_thread_intrinsic (cfg);
6943 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6945 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6947 MONO_ADD_INS (cfg->cbb, thread_ins);
6949 MonoMethod *thread_method;
6950 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6951 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6953 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6954 addr->klass = mono_class_from_mono_type (tls_field->type);
6955 addr->type = STACK_MP;
6956 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6957 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6959 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6966 * Handle calls made to ctors from NEWOBJ opcodes.
6968 * REF_BBLOCK will point to the current bblock after the call.
6971 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
6972 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
6974 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
6975 MonoBasicBlock *bblock = *ref_bblock;
6977 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
6978 mono_method_is_generic_sharable (cmethod, TRUE)) {
6979 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
6980 mono_class_vtable (cfg->domain, cmethod->klass);
6981 CHECK_TYPELOAD (cmethod->klass);
6983 vtable_arg = emit_get_rgctx_method (cfg, context_used,
6984 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6987 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
6988 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6990 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6992 CHECK_TYPELOAD (cmethod->klass);
6993 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6998 /* Avoid virtual calls to ctors if possible */
6999 if (mono_class_is_marshalbyref (cmethod->klass))
7000 callvirt_this_arg = sp [0];
7002 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7003 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7004 CHECK_CFG_EXCEPTION;
7005 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7006 mono_method_check_inlining (cfg, cmethod) &&
7007 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7010 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7011 cfg->real_offset += 5;
7013 *inline_costs += costs - 5;
7014 *ref_bblock = bblock;
7016 INLINE_FAILURE ("inline failure");
7017 // FIXME-VT: Clean this up
7018 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7019 GSHAREDVT_FAILURE(*ip);
7020 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7022 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7025 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7026 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7027 } else if (context_used &&
7028 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7029 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7030 MonoInst *cmethod_addr;
7032 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7034 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7035 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7037 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7039 INLINE_FAILURE ("ctor call");
7040 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7041 callvirt_this_arg, NULL, vtable_arg);
7048 * mono_method_to_ir:
7050 * Translate the .net IL into linear IR.
7053 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7054 MonoInst *return_var, MonoInst **inline_args,
7055 guint inline_offset, gboolean is_virtual_call)
7058 MonoInst *ins, **sp, **stack_start;
7059 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7060 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7061 MonoMethod *cmethod, *method_definition;
7062 MonoInst **arg_array;
7063 MonoMethodHeader *header;
7065 guint32 token, ins_flag;
7067 MonoClass *constrained_call = NULL;
7068 unsigned char *ip, *end, *target, *err_pos;
7069 MonoMethodSignature *sig;
7070 MonoGenericContext *generic_context = NULL;
7071 MonoGenericContainer *generic_container = NULL;
7072 MonoType **param_types;
7073 int i, n, start_new_bblock, dreg;
7074 int num_calls = 0, inline_costs = 0;
7075 int breakpoint_id = 0;
7077 MonoBoolean security, pinvoke;
7078 MonoSecurityManager* secman = NULL;
7079 MonoDeclSecurityActions actions;
7080 GSList *class_inits = NULL;
7081 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7083 gboolean init_locals, seq_points, skip_dead_blocks;
7084 gboolean sym_seq_points = FALSE;
7085 MonoInst *cached_tls_addr = NULL;
7086 MonoDebugMethodInfo *minfo;
7087 MonoBitSet *seq_point_locs = NULL;
7088 MonoBitSet *seq_point_set_locs = NULL;
7090 cfg->disable_inline = is_jit_optimizer_disabled (method);
7092 /* serialization and xdomain stuff may need access to private fields and methods */
7093 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7094 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7095 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7096 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7097 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7098 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7100 dont_verify |= mono_security_smcs_hack_enabled ();
7102 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7103 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7104 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7105 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7106 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7108 image = method->klass->image;
7109 header = mono_method_get_header (method);
7111 MonoLoaderError *error;
7113 if ((error = mono_loader_get_last_error ())) {
7114 mono_cfg_set_exception (cfg, error->exception_type);
7116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7117 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7119 goto exception_exit;
7121 generic_container = mono_method_get_generic_container (method);
7122 sig = mono_method_signature (method);
7123 num_args = sig->hasthis + sig->param_count;
7124 ip = (unsigned char*)header->code;
7125 cfg->cil_start = ip;
7126 end = ip + header->code_size;
7127 cfg->stat_cil_code_size += header->code_size;
7129 seq_points = cfg->gen_seq_points && cfg->method == method;
7130 #ifdef PLATFORM_ANDROID
7131 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7134 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7135 /* We could hit a seq point before attaching to the JIT (#8338) */
7139 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7140 minfo = mono_debug_lookup_method (method);
7142 int i, n_il_offsets;
7146 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7147 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7148 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7149 sym_seq_points = TRUE;
7150 for (i = 0; i < n_il_offsets; ++i) {
7151 if (il_offsets [i] < header->code_size)
7152 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7154 g_free (il_offsets);
7155 g_free (line_numbers);
7160 * Methods without init_locals set could cause asserts in various passes
7161 * (#497220). To work around this, we emit dummy initialization opcodes
7162 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7163 * on some platforms.
7165 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7166 init_locals = header->init_locals;
7170 method_definition = method;
7171 while (method_definition->is_inflated) {
7172 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7173 method_definition = imethod->declaring;
7176 /* SkipVerification is not allowed if core-clr is enabled */
7177 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7179 dont_verify_stloc = TRUE;
7182 if (sig->is_inflated)
7183 generic_context = mono_method_get_context (method);
7184 else if (generic_container)
7185 generic_context = &generic_container->context;
7186 cfg->generic_context = generic_context;
7188 if (!cfg->generic_sharing_context)
7189 g_assert (!sig->has_type_parameters);
7191 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7192 g_assert (method->is_inflated);
7193 g_assert (mono_method_get_context (method)->method_inst);
7195 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7196 g_assert (sig->generic_param_count);
7198 if (cfg->method == method) {
7199 cfg->real_offset = 0;
7201 cfg->real_offset = inline_offset;
7204 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7205 cfg->cil_offset_to_bb_len = header->code_size;
7207 cfg->current_method = method;
7209 if (cfg->verbose_level > 2)
7210 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7212 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7214 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7215 for (n = 0; n < sig->param_count; ++n)
7216 param_types [n + sig->hasthis] = sig->params [n];
7217 cfg->arg_types = param_types;
7219 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7220 if (cfg->method == method) {
7222 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7223 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7226 NEW_BBLOCK (cfg, start_bblock);
7227 cfg->bb_entry = start_bblock;
7228 start_bblock->cil_code = NULL;
7229 start_bblock->cil_length = 0;
7230 #if defined(__native_client_codegen__)
7231 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7232 ins->dreg = alloc_dreg (cfg, STACK_I4);
7233 MONO_ADD_INS (start_bblock, ins);
7237 NEW_BBLOCK (cfg, end_bblock);
7238 cfg->bb_exit = end_bblock;
7239 end_bblock->cil_code = NULL;
7240 end_bblock->cil_length = 0;
7241 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7242 g_assert (cfg->num_bblocks == 2);
7244 arg_array = cfg->args;
7246 if (header->num_clauses) {
7247 cfg->spvars = g_hash_table_new (NULL, NULL);
7248 cfg->exvars = g_hash_table_new (NULL, NULL);
7250 /* handle exception clauses */
7251 for (i = 0; i < header->num_clauses; ++i) {
7252 MonoBasicBlock *try_bb;
7253 MonoExceptionClause *clause = &header->clauses [i];
7254 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7255 try_bb->real_offset = clause->try_offset;
7256 try_bb->try_start = TRUE;
7257 try_bb->region = ((i + 1) << 8) | clause->flags;
7258 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7259 tblock->real_offset = clause->handler_offset;
7260 tblock->flags |= BB_EXCEPTION_HANDLER;
7263 * Linking the try block with the EH block hinders inlining as we won't be able to
7264 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7266 if (COMPILE_LLVM (cfg))
7267 link_bblock (cfg, try_bb, tblock);
7269 if (*(ip + clause->handler_offset) == CEE_POP)
7270 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7272 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7273 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7274 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7275 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7276 MONO_ADD_INS (tblock, ins);
7278 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7279 /* finally clauses already have a seq point */
7280 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7281 MONO_ADD_INS (tblock, ins);
7284 /* todo: is a fault block unsafe to optimize? */
7285 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7286 tblock->flags |= BB_EXCEPTION_UNSAFE;
7290 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7292 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7294 /* catch and filter blocks get the exception object on the stack */
7295 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7296 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7297 MonoInst *dummy_use;
7299 /* mostly like handle_stack_args (), but just sets the input args */
7300 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7301 tblock->in_scount = 1;
7302 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7303 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7306 * Add a dummy use for the exvar so its liveness info will be
7310 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7312 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7313 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7314 tblock->flags |= BB_EXCEPTION_HANDLER;
7315 tblock->real_offset = clause->data.filter_offset;
7316 tblock->in_scount = 1;
7317 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7318 /* The filter block shares the exvar with the handler block */
7319 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7320 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7321 MONO_ADD_INS (tblock, ins);
7325 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7326 clause->data.catch_class &&
7327 cfg->generic_sharing_context &&
7328 mono_class_check_context_used (clause->data.catch_class)) {
7330 * In shared generic code with catch
7331 * clauses containing type variables
7332 * the exception handling code has to
7333 * be able to get to the rgctx.
7334 * Therefore we have to make sure that
7335 * the vtable/mrgctx argument (for
7336 * static or generic methods) or the
7337 * "this" argument (for non-static
7338 * methods) are live.
7340 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7341 mini_method_get_context (method)->method_inst ||
7342 method->klass->valuetype) {
7343 mono_get_vtable_var (cfg);
7345 MonoInst *dummy_use;
7347 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7352 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7353 cfg->cbb = start_bblock;
7354 cfg->args = arg_array;
7355 mono_save_args (cfg, sig, inline_args);
7358 /* FIRST CODE BLOCK */
7359 NEW_BBLOCK (cfg, bblock);
7360 bblock->cil_code = ip;
7364 ADD_BBLOCK (cfg, bblock);
7366 if (cfg->method == method) {
7367 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7368 if (breakpoint_id) {
7369 MONO_INST_NEW (cfg, ins, OP_BREAK);
7370 MONO_ADD_INS (bblock, ins);
7374 if (mono_security_cas_enabled ())
7375 secman = mono_security_manager_get_methods ();
7377 security = (secman && mono_security_method_has_declsec (method));
7378 /* at this point having security doesn't mean we have any code to generate */
7379 if (security && (cfg->method == method)) {
7380 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7381 * And we do not want to enter the next section (with allocation) if we
7382 * have nothing to generate */
7383 security = mono_declsec_get_demands (method, &actions);
7386 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7387 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7389 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7390 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7391 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7393 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7394 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7398 mono_custom_attrs_free (custom);
7401 custom = mono_custom_attrs_from_class (wrapped->klass);
7402 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7406 mono_custom_attrs_free (custom);
7409 /* not a P/Invoke after all */
7414 /* we use a separate basic block for the initialization code */
7415 NEW_BBLOCK (cfg, init_localsbb);
7416 cfg->bb_init = init_localsbb;
7417 init_localsbb->real_offset = cfg->real_offset;
7418 start_bblock->next_bb = init_localsbb;
7419 init_localsbb->next_bb = bblock;
7420 link_bblock (cfg, start_bblock, init_localsbb);
7421 link_bblock (cfg, init_localsbb, bblock);
7423 cfg->cbb = init_localsbb;
7425 if (cfg->gsharedvt && cfg->method == method) {
7426 MonoGSharedVtMethodInfo *info;
7427 MonoInst *var, *locals_var;
7430 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7431 info->method = cfg->method;
7432 info->count_entries = 16;
7433 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7434 cfg->gsharedvt_info = info;
7436 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7437 /* prevent it from being register allocated */
7438 //var->flags |= MONO_INST_VOLATILE;
7439 cfg->gsharedvt_info_var = var;
7441 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7442 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7444 /* Allocate locals */
7445 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7446 /* prevent it from being register allocated */
7447 //locals_var->flags |= MONO_INST_VOLATILE;
7448 cfg->gsharedvt_locals_var = locals_var;
7450 dreg = alloc_ireg (cfg);
7451 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7453 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7454 ins->dreg = locals_var->dreg;
7456 MONO_ADD_INS (cfg->cbb, ins);
7457 cfg->gsharedvt_locals_var_ins = ins;
7459 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7462 ins->flags |= MONO_INST_INIT;
7466 /* at this point we know, if security is TRUE, that some code needs to be generated */
7467 if (security && (cfg->method == method)) {
7470 cfg->stat_cas_demand_generation++;
7472 if (actions.demand.blob) {
7473 /* Add code for SecurityAction.Demand */
7474 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7475 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7476 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7477 mono_emit_method_call (cfg, secman->demand, args, NULL);
7479 if (actions.noncasdemand.blob) {
7480 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7481 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7482 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7483 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7484 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7485 mono_emit_method_call (cfg, secman->demand, args, NULL);
7487 if (actions.demandchoice.blob) {
7488 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7489 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7490 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7491 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7492 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7496 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7498 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7501 if (mono_security_core_clr_enabled ()) {
7502 /* check if this is native code, e.g. an icall or a p/invoke */
7503 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7504 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7506 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7507 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7509 /* if this ia a native call then it can only be JITted from platform code */
7510 if ((icall || pinvk) && method->klass && method->klass->image) {
7511 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7512 MonoException *ex = icall ? mono_get_exception_security () :
7513 mono_get_exception_method_access ();
7514 emit_throw_exception (cfg, ex);
7521 CHECK_CFG_EXCEPTION;
7523 if (header->code_size == 0)
7526 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7531 if (cfg->method == method)
7532 mono_debug_init_method (cfg, bblock, breakpoint_id);
7534 for (n = 0; n < header->num_locals; ++n) {
7535 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7540 /* We force the vtable variable here for all shared methods
7541 for the possibility that they might show up in a stack
7542 trace where their exact instantiation is needed. */
7543 if (cfg->generic_sharing_context && method == cfg->method) {
7544 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7545 mini_method_get_context (method)->method_inst ||
7546 method->klass->valuetype) {
7547 mono_get_vtable_var (cfg);
7549 /* FIXME: Is there a better way to do this?
7550 We need the variable live for the duration
7551 of the whole method. */
7552 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7556 /* add a check for this != NULL to inlined methods */
7557 if (is_virtual_call) {
7560 NEW_ARGLOAD (cfg, arg_ins, 0);
7561 MONO_ADD_INS (cfg->cbb, arg_ins);
7562 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7565 skip_dead_blocks = !dont_verify;
7566 if (skip_dead_blocks) {
7567 original_bb = bb = mono_basic_block_split (method, &cfg->error);
7572 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7573 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7576 start_new_bblock = 0;
7579 if (cfg->method == method)
7580 cfg->real_offset = ip - header->code;
7582 cfg->real_offset = inline_offset;
7587 if (start_new_bblock) {
7588 bblock->cil_length = ip - bblock->cil_code;
7589 if (start_new_bblock == 2) {
7590 g_assert (ip == tblock->cil_code);
7592 GET_BBLOCK (cfg, tblock, ip);
7594 bblock->next_bb = tblock;
7597 start_new_bblock = 0;
7598 for (i = 0; i < bblock->in_scount; ++i) {
7599 if (cfg->verbose_level > 3)
7600 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7601 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7605 g_slist_free (class_inits);
7608 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7609 link_bblock (cfg, bblock, tblock);
7610 if (sp != stack_start) {
7611 handle_stack_args (cfg, stack_start, sp - stack_start);
7613 CHECK_UNVERIFIABLE (cfg);
7615 bblock->next_bb = tblock;
7618 for (i = 0; i < bblock->in_scount; ++i) {
7619 if (cfg->verbose_level > 3)
7620 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7621 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7624 g_slist_free (class_inits);
7629 if (skip_dead_blocks) {
7630 int ip_offset = ip - header->code;
7632 if (ip_offset == bb->end)
7636 int op_size = mono_opcode_size (ip, end);
7637 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7639 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7641 if (ip_offset + op_size == bb->end) {
7642 MONO_INST_NEW (cfg, ins, OP_NOP);
7643 MONO_ADD_INS (bblock, ins);
7644 start_new_bblock = 1;
7652 * Sequence points are points where the debugger can place a breakpoint.
7653 * Currently, we generate these automatically at points where the IL
7656 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7658 * Make methods interruptable at the beginning, and at the targets of
7659 * backward branches.
7660 * Also, do this at the start of every bblock in methods with clauses too,
7661 * to be able to handle instructions with inprecise control flow like
7663 * Backward branches are handled at the end of method-to-ir ().
7665 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7667 /* Avoid sequence points on empty IL like .volatile */
7668 // FIXME: Enable this
7669 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7670 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7671 if (sp != stack_start)
7672 ins->flags |= MONO_INST_NONEMPTY_STACK;
7673 MONO_ADD_INS (cfg->cbb, ins);
7676 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7679 bblock->real_offset = cfg->real_offset;
7681 if ((cfg->method == method) && cfg->coverage_info) {
7682 guint32 cil_offset = ip - header->code;
7683 cfg->coverage_info->data [cil_offset].cil_code = ip;
7685 /* TODO: Use an increment here */
7686 #if defined(TARGET_X86)
7687 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7688 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7690 MONO_ADD_INS (cfg->cbb, ins);
7692 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7693 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7697 if (cfg->verbose_level > 3)
7698 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7702 if (seq_points && !sym_seq_points && sp != stack_start) {
7704 * The C# compiler uses these nops to notify the JIT that it should
7705 * insert seq points.
7707 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7708 MONO_ADD_INS (cfg->cbb, ins);
7710 if (cfg->keep_cil_nops)
7711 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7713 MONO_INST_NEW (cfg, ins, OP_NOP);
7715 MONO_ADD_INS (bblock, ins);
7718 if (should_insert_brekpoint (cfg->method)) {
7719 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7721 MONO_INST_NEW (cfg, ins, OP_NOP);
7724 MONO_ADD_INS (bblock, ins);
7730 CHECK_STACK_OVF (1);
7731 n = (*ip)-CEE_LDARG_0;
7733 EMIT_NEW_ARGLOAD (cfg, ins, n);
7741 CHECK_STACK_OVF (1);
7742 n = (*ip)-CEE_LDLOC_0;
7744 EMIT_NEW_LOCLOAD (cfg, ins, n);
7753 n = (*ip)-CEE_STLOC_0;
7756 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7758 emit_stloc_ir (cfg, sp, header, n);
7765 CHECK_STACK_OVF (1);
7768 EMIT_NEW_ARGLOAD (cfg, ins, n);
7774 CHECK_STACK_OVF (1);
7777 NEW_ARGLOADA (cfg, ins, n);
7778 MONO_ADD_INS (cfg->cbb, ins);
7788 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7790 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7795 CHECK_STACK_OVF (1);
7798 EMIT_NEW_LOCLOAD (cfg, ins, n);
7802 case CEE_LDLOCA_S: {
7803 unsigned char *tmp_ip;
7805 CHECK_STACK_OVF (1);
7806 CHECK_LOCAL (ip [1]);
7808 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7814 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7823 CHECK_LOCAL (ip [1]);
7824 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7826 emit_stloc_ir (cfg, sp, header, ip [1]);
7831 CHECK_STACK_OVF (1);
7832 EMIT_NEW_PCONST (cfg, ins, NULL);
7833 ins->type = STACK_OBJ;
7838 CHECK_STACK_OVF (1);
7839 EMIT_NEW_ICONST (cfg, ins, -1);
7852 CHECK_STACK_OVF (1);
7853 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7859 CHECK_STACK_OVF (1);
7861 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7867 CHECK_STACK_OVF (1);
7868 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7874 CHECK_STACK_OVF (1);
7875 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7876 ins->type = STACK_I8;
7877 ins->dreg = alloc_dreg (cfg, STACK_I8);
7879 ins->inst_l = (gint64)read64 (ip);
7880 MONO_ADD_INS (bblock, ins);
7886 gboolean use_aotconst = FALSE;
7888 #ifdef TARGET_POWERPC
7889 /* FIXME: Clean this up */
7890 if (cfg->compile_aot)
7891 use_aotconst = TRUE;
7894 /* FIXME: we should really allocate this only late in the compilation process */
7895 f = mono_domain_alloc (cfg->domain, sizeof (float));
7897 CHECK_STACK_OVF (1);
7903 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7905 dreg = alloc_freg (cfg);
7906 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7907 ins->type = STACK_R8;
7909 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7910 ins->type = STACK_R8;
7911 ins->dreg = alloc_dreg (cfg, STACK_R8);
7913 MONO_ADD_INS (bblock, ins);
7923 gboolean use_aotconst = FALSE;
7925 #ifdef TARGET_POWERPC
7926 /* FIXME: Clean this up */
7927 if (cfg->compile_aot)
7928 use_aotconst = TRUE;
7931 /* FIXME: we should really allocate this only late in the compilation process */
7932 d = mono_domain_alloc (cfg->domain, sizeof (double));
7934 CHECK_STACK_OVF (1);
7940 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7942 dreg = alloc_freg (cfg);
7943 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7944 ins->type = STACK_R8;
7946 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7947 ins->type = STACK_R8;
7948 ins->dreg = alloc_dreg (cfg, STACK_R8);
7950 MONO_ADD_INS (bblock, ins);
7959 MonoInst *temp, *store;
7961 CHECK_STACK_OVF (1);
7965 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7966 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7968 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7971 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7984 if (sp [0]->type == STACK_R8)
7985 /* we need to pop the value from the x86 FP stack */
7986 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7992 INLINE_FAILURE ("jmp");
7993 GSHAREDVT_FAILURE (*ip);
7996 if (stack_start != sp)
7998 token = read32 (ip + 1);
7999 /* FIXME: check the signature matches */
8000 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8002 if (!cmethod || mono_loader_get_last_error ())
8005 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8006 GENERIC_SHARING_FAILURE (CEE_JMP);
8008 if (mono_security_cas_enabled ())
8009 CHECK_CFG_EXCEPTION;
8011 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8013 if (ARCH_HAVE_OP_TAIL_CALL) {
8014 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8017 /* Handle tail calls similarly to calls */
8018 n = fsig->param_count + fsig->hasthis;
8022 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8023 call->method = cmethod;
8024 call->tail_call = TRUE;
8025 call->signature = mono_method_signature (cmethod);
8026 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8027 call->inst.inst_p0 = cmethod;
8028 for (i = 0; i < n; ++i)
8029 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8031 mono_arch_emit_call (cfg, call);
8032 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8033 MONO_ADD_INS (bblock, (MonoInst*)call);
8035 for (i = 0; i < num_args; ++i)
8036 /* Prevent arguments from being optimized away */
8037 arg_array [i]->flags |= MONO_INST_VOLATILE;
8039 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8040 ins = (MonoInst*)call;
8041 ins->inst_p0 = cmethod;
8042 MONO_ADD_INS (bblock, ins);
8046 start_new_bblock = 1;
8051 case CEE_CALLVIRT: {
8052 MonoInst *addr = NULL;
8053 MonoMethodSignature *fsig = NULL;
8055 int virtual = *ip == CEE_CALLVIRT;
8056 int calli = *ip == CEE_CALLI;
8057 gboolean pass_imt_from_rgctx = FALSE;
8058 MonoInst *imt_arg = NULL;
8059 MonoInst *keep_this_alive = NULL;
8060 gboolean pass_vtable = FALSE;
8061 gboolean pass_mrgctx = FALSE;
8062 MonoInst *vtable_arg = NULL;
8063 gboolean check_this = FALSE;
8064 gboolean supported_tail_call = FALSE;
8065 gboolean tail_call = FALSE;
8066 gboolean need_seq_point = FALSE;
8067 guint32 call_opcode = *ip;
8068 gboolean emit_widen = TRUE;
8069 gboolean push_res = TRUE;
8070 gboolean skip_ret = FALSE;
8071 gboolean delegate_invoke = FALSE;
8074 token = read32 (ip + 1);
8079 //GSHAREDVT_FAILURE (*ip);
8084 fsig = mini_get_signature (method, token, generic_context);
8085 n = fsig->param_count + fsig->hasthis;
8087 if (method->dynamic && fsig->pinvoke) {
8091 * This is a call through a function pointer using a pinvoke
8092 * signature. Have to create a wrapper and call that instead.
8093 * FIXME: This is very slow, need to create a wrapper at JIT time
8094 * instead based on the signature.
8096 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8097 EMIT_NEW_PCONST (cfg, args [1], fsig);
8099 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8102 MonoMethod *cil_method;
8104 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8105 cil_method = cmethod;
8107 if (constrained_call) {
8108 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8109 if (cfg->verbose_level > 2)
8110 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8111 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
8112 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
8113 cfg->generic_sharing_context)) {
8114 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
8117 if (cfg->verbose_level > 2)
8118 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8120 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8122 * This is needed since get_method_constrained can't find
8123 * the method in klass representing a type var.
8124 * The type var is guaranteed to be a reference type in this
8127 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
8128 g_assert (!cmethod->klass->valuetype);
8130 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
8135 if (!cmethod || mono_loader_get_last_error ())
8137 if (!dont_verify && !cfg->skip_visibility) {
8138 MonoMethod *target_method = cil_method;
8139 if (method->is_inflated) {
8140 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8142 if (!mono_method_can_access_method (method_definition, target_method) &&
8143 !mono_method_can_access_method (method, cil_method))
8144 METHOD_ACCESS_FAILURE (method, cil_method);
8147 if (mono_security_core_clr_enabled ())
8148 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8150 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8151 /* MS.NET seems to silently convert this to a callvirt */
8156 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8157 * converts to a callvirt.
8159 * tests/bug-515884.il is an example of this behavior
8161 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8162 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8163 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8167 if (!cmethod->klass->inited)
8168 if (!mono_class_init (cmethod->klass))
8169 TYPE_LOAD_ERROR (cmethod->klass);
8171 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8172 mini_class_is_system_array (cmethod->klass)) {
8173 array_rank = cmethod->klass->rank;
8174 fsig = mono_method_signature (cmethod);
8176 fsig = mono_method_signature (cmethod);
8181 if (fsig->pinvoke) {
8182 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8183 check_for_pending_exc, cfg->compile_aot);
8184 fsig = mono_method_signature (wrapper);
8185 } else if (constrained_call) {
8186 fsig = mono_method_signature (cmethod);
8188 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8193 mono_save_token_info (cfg, image, token, cil_method);
8195 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8197 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
8198 * foo (bar (), baz ())
8199 * works correctly. MS does this also:
8200 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
8201 * The problem with this approach is that the debugger will stop after all calls returning a value,
8202 * even for simple cases, like:
8205 /* Special case a few common successor opcodes */
8206 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8207 need_seq_point = TRUE;
8210 n = fsig->param_count + fsig->hasthis;
8212 /* Don't support calls made using type arguments for now */
8214 if (cfg->gsharedvt) {
8215 if (mini_is_gsharedvt_signature (cfg, fsig))
8216 GSHAREDVT_FAILURE (*ip);
8220 if (mono_security_cas_enabled ()) {
8221 if (check_linkdemand (cfg, method, cmethod))
8222 INLINE_FAILURE ("linkdemand");
8223 CHECK_CFG_EXCEPTION;
8226 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8227 g_assert_not_reached ();
8230 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8233 if (!cfg->generic_sharing_context && cmethod)
8234 g_assert (!mono_method_check_context_used (cmethod));
8238 //g_assert (!virtual || fsig->hasthis);
8242 if (constrained_call) {
8243 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8245 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
8247 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8248 /* The 'Own method' case below */
8249 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8250 /* 'The type parameter is instantiated as a reference type' case below. */
8251 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
8252 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
8253 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
8254 MonoInst *args [16];
8257 * This case handles calls to
8258 * - object:ToString()/Equals()/GetHashCode(),
8259 * - System.IComparable<T>:CompareTo()
8260 * - System.IEquatable<T>:Equals ()
8261 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
8265 if (mono_method_check_context_used (cmethod))
8266 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8268 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8269 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8271 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8272 if (fsig->hasthis && fsig->param_count) {
8273 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8274 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8275 ins->dreg = alloc_preg (cfg);
8276 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8277 MONO_ADD_INS (cfg->cbb, ins);
8280 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8283 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8285 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8286 addr_reg = ins->dreg;
8287 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8289 EMIT_NEW_ICONST (cfg, args [3], 0);
8290 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8293 EMIT_NEW_ICONST (cfg, args [3], 0);
8294 EMIT_NEW_ICONST (cfg, args [4], 0);
8296 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8299 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8300 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8301 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
8305 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8306 MONO_ADD_INS (cfg->cbb, add);
8308 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8309 MONO_ADD_INS (cfg->cbb, ins);
8310 /* ins represents the call result */
8315 GSHAREDVT_FAILURE (*ip);
8319 * We have the `constrained.' prefix opcode.
8321 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8323 * The type parameter is instantiated as a valuetype,
8324 * but that type doesn't override the method we're
8325 * calling, so we need to box `this'.
8327 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8328 ins->klass = constrained_call;
8329 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8330 CHECK_CFG_EXCEPTION;
8331 } else if (!constrained_call->valuetype) {
8332 int dreg = alloc_ireg_ref (cfg);
8335 * The type parameter is instantiated as a reference
8336 * type. We have a managed pointer on the stack, so
8337 * we need to dereference it here.
8339 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8340 ins->type = STACK_OBJ;
8343 if (cmethod->klass->valuetype) {
8346 /* Interface method */
8349 mono_class_setup_vtable (constrained_call);
8350 CHECK_TYPELOAD (constrained_call);
8351 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8353 TYPE_LOAD_ERROR (constrained_call);
8354 slot = mono_method_get_vtable_slot (cmethod);
8356 TYPE_LOAD_ERROR (cmethod->klass);
8357 cmethod = constrained_call->vtable [ioffset + slot];
8359 if (cmethod->klass == mono_defaults.enum_class) {
8360 /* Enum implements some interfaces, so treat this as the first case */
8361 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8362 ins->klass = constrained_call;
8363 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8364 CHECK_CFG_EXCEPTION;
8369 constrained_call = NULL;
8372 if (!calli && check_call_signature (cfg, fsig, sp))
8375 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8376 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8377 delegate_invoke = TRUE;
8380 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8382 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8383 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8391 * If the callee is a shared method, then its static cctor
8392 * might not get called after the call was patched.
8394 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8395 emit_generic_class_init (cfg, cmethod->klass);
8396 CHECK_TYPELOAD (cmethod->klass);
8400 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8402 if (cfg->generic_sharing_context && cmethod) {
8403 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8405 context_used = mini_method_check_context_used (cfg, cmethod);
8407 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8408 /* Generic method interface
8409 calls are resolved via a
8410 helper function and don't
8412 if (!cmethod_context || !cmethod_context->method_inst)
8413 pass_imt_from_rgctx = TRUE;
8417 * If a shared method calls another
8418 * shared method then the caller must
8419 * have a generic sharing context
8420 * because the magic trampoline
8421 * requires it. FIXME: We shouldn't
8422 * have to force the vtable/mrgctx
8423 * variable here. Instead there
8424 * should be a flag in the cfg to
8425 * request a generic sharing context.
8428 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8429 mono_get_vtable_var (cfg);
8434 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8436 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8438 CHECK_TYPELOAD (cmethod->klass);
8439 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8444 g_assert (!vtable_arg);
8446 if (!cfg->compile_aot) {
8448 * emit_get_rgctx_method () calls mono_class_vtable () so check
8449 * for type load errors before.
8451 mono_class_setup_vtable (cmethod->klass);
8452 CHECK_TYPELOAD (cmethod->klass);
8455 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8457 /* !marshalbyref is needed to properly handle generic methods + remoting */
8458 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8459 MONO_METHOD_IS_FINAL (cmethod)) &&
8460 !mono_class_is_marshalbyref (cmethod->klass)) {
8467 if (pass_imt_from_rgctx) {
8468 g_assert (!pass_vtable);
8471 imt_arg = emit_get_rgctx_method (cfg, context_used,
8472 cmethod, MONO_RGCTX_INFO_METHOD);
8476 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8478 /* Calling virtual generic methods */
8479 if (cmethod && virtual &&
8480 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8481 !(MONO_METHOD_IS_FINAL (cmethod) &&
8482 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8483 fsig->generic_param_count &&
8484 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8485 MonoInst *this_temp, *this_arg_temp, *store;
8486 MonoInst *iargs [4];
8487 gboolean use_imt = FALSE;
8489 g_assert (fsig->is_inflated);
8491 /* Prevent inlining of methods that contain indirect calls */
8492 INLINE_FAILURE ("virtual generic call");
8494 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8495 GSHAREDVT_FAILURE (*ip);
8497 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8498 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8503 g_assert (!imt_arg);
8505 g_assert (cmethod->is_inflated);
8506 imt_arg = emit_get_rgctx_method (cfg, context_used,
8507 cmethod, MONO_RGCTX_INFO_METHOD);
8508 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8510 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8511 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8512 MONO_ADD_INS (bblock, store);
8514 /* FIXME: This should be a managed pointer */
8515 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8517 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8518 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8519 cmethod, MONO_RGCTX_INFO_METHOD);
8520 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8521 addr = mono_emit_jit_icall (cfg,
8522 mono_helper_compile_generic_method, iargs);
8524 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8526 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8533 * Implement a workaround for the inherent races involved in locking:
8539 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8540 * try block, the Exit () won't be executed, see:
8541 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8542 * To work around this, we extend such try blocks to include the last x bytes
8543 * of the Monitor.Enter () call.
8545 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8546 MonoBasicBlock *tbb;
8548 GET_BBLOCK (cfg, tbb, ip + 5);
8550 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8551 * from Monitor.Enter like ArgumentNullException.
8553 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8554 /* Mark this bblock as needing to be extended */
8555 tbb->extend_try_block = TRUE;
8559 /* Conversion to a JIT intrinsic */
8560 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8562 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8563 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8570 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8571 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8572 mono_method_check_inlining (cfg, cmethod)) {
8574 gboolean always = FALSE;
8576 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8577 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8578 /* Prevent inlining of methods that call wrappers */
8579 INLINE_FAILURE ("wrapper call");
8580 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8584 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
8586 cfg->real_offset += 5;
8588 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8589 /* *sp is already set by inline_method */
8594 inline_costs += costs;
8600 /* Tail recursion elimination */
8601 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8602 gboolean has_vtargs = FALSE;
8605 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8606 INLINE_FAILURE ("tail call");
8608 /* keep it simple */
8609 for (i = fsig->param_count - 1; i >= 0; i--) {
8610 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8615 for (i = 0; i < n; ++i)
8616 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8617 MONO_INST_NEW (cfg, ins, OP_BR);
8618 MONO_ADD_INS (bblock, ins);
8619 tblock = start_bblock->out_bb [0];
8620 link_bblock (cfg, bblock, tblock);
8621 ins->inst_target_bb = tblock;
8622 start_new_bblock = 1;
8624 /* skip the CEE_RET, too */
8625 if (ip_in_bb (cfg, bblock, ip + 5))
8632 inline_costs += 10 * num_calls++;
8635 * Making generic calls out of gsharedvt methods.
8636 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8637 * patching gshared method addresses into a gsharedvt method.
8639 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class)) {
8640 MonoRgctxInfoType info_type;
8643 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8644 //GSHAREDVT_FAILURE (*ip);
8645 // disable for possible remoting calls
8646 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8647 GSHAREDVT_FAILURE (*ip);
8648 if (fsig->generic_param_count) {
8649 /* virtual generic call */
8650 g_assert (mono_use_imt);
8651 g_assert (!imt_arg);
8652 /* Same as the virtual generic case above */
8653 imt_arg = emit_get_rgctx_method (cfg, context_used,
8654 cmethod, MONO_RGCTX_INFO_METHOD);
8655 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8657 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
8658 /* This can happen when we call a fully instantiated iface method */
8659 imt_arg = emit_get_rgctx_method (cfg, context_used,
8660 cmethod, MONO_RGCTX_INFO_METHOD);
8665 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8666 /* test_0_multi_dim_arrays () in gshared.cs */
8667 GSHAREDVT_FAILURE (*ip);
8669 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8670 keep_this_alive = sp [0];
8672 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8673 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8675 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8676 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8678 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8680 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8682 * We pass the address to the gsharedvt trampoline in the rgctx reg
8684 MonoInst *callee = addr;
8686 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8688 GSHAREDVT_FAILURE (*ip);
8690 addr = emit_get_rgctx_sig (cfg, context_used,
8691 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8692 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8696 /* Generic sharing */
8699 * Use this if the callee is gsharedvt sharable too, since
8700 * at runtime we might find an instantiation so the call cannot
8701 * be patched (the 'no_patch' code path in mini-trampolines.c).
8703 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8704 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8705 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8706 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8707 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8708 INLINE_FAILURE ("gshared");
8710 g_assert (cfg->generic_sharing_context && cmethod);
8714 * We are compiling a call to a
8715 * generic method from shared code,
8716 * which means that we have to look up
8717 * the method in the rgctx and do an
8721 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8723 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8724 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8728 /* Indirect calls */
8730 if (call_opcode == CEE_CALL)
8731 g_assert (context_used);
8732 else if (call_opcode == CEE_CALLI)
8733 g_assert (!vtable_arg);
8735 /* FIXME: what the hell is this??? */
8736 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8737 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8739 /* Prevent inlining of methods with indirect calls */
8740 INLINE_FAILURE ("indirect call");
8742 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8747 * Instead of emitting an indirect call, emit a direct call
8748 * with the contents of the aotconst as the patch info.
8750 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8751 info_type = addr->inst_c1;
8752 info_data = addr->inst_p0;
8754 info_type = addr->inst_right->inst_c1;
8755 info_data = addr->inst_right->inst_left;
8758 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8759 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8764 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8772 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8773 MonoInst *val = sp [fsig->param_count];
8775 if (val->type == STACK_OBJ) {
8776 MonoInst *iargs [2];
8781 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8784 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8785 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8786 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8787 emit_write_barrier (cfg, addr, val);
8788 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8789 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8791 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8792 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8793 if (!cmethod->klass->element_class->valuetype && !readonly)
8794 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8795 CHECK_TYPELOAD (cmethod->klass);
8798 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8801 g_assert_not_reached ();
8808 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8812 /* Tail prefix / tail call optimization */
8814 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8815 /* FIXME: runtime generic context pointer for jumps? */
8816 /* FIXME: handle this for generic sharing eventually */
8817 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8818 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8819 supported_tail_call = TRUE;
8821 if (supported_tail_call) {
8824 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8825 INLINE_FAILURE ("tail call");
8827 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8829 if (ARCH_HAVE_OP_TAIL_CALL) {
8830 /* Handle tail calls similarly to normal calls */
8833 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8835 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8836 call->tail_call = TRUE;
8837 call->method = cmethod;
8838 call->signature = mono_method_signature (cmethod);
8841 * We implement tail calls by storing the actual arguments into the
8842 * argument variables, then emitting a CEE_JMP.
8844 for (i = 0; i < n; ++i) {
8845 /* Prevent argument from being register allocated */
8846 arg_array [i]->flags |= MONO_INST_VOLATILE;
8847 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8849 ins = (MonoInst*)call;
8850 ins->inst_p0 = cmethod;
8851 ins->inst_p1 = arg_array [0];
8852 MONO_ADD_INS (bblock, ins);
8853 link_bblock (cfg, bblock, end_bblock);
8854 start_new_bblock = 1;
8856 // FIXME: Eliminate unreachable epilogs
8859 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8860 * only reachable from this call.
8862 GET_BBLOCK (cfg, tblock, ip + 5);
8863 if (tblock == bblock || tblock->in_count == 0)
8872 * Synchronized wrappers.
8873 * Its hard to determine where to replace a method with its synchronized
8874 * wrapper without causing an infinite recursion. The current solution is
8875 * to add the synchronized wrapper in the trampolines, and to
8876 * change the called method to a dummy wrapper, and resolve that wrapper
8877 * to the real method in mono_jit_compile_method ().
8879 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8880 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8881 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8882 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8886 INLINE_FAILURE ("call");
8887 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8888 imt_arg, vtable_arg);
8891 link_bblock (cfg, bblock, end_bblock);
8892 start_new_bblock = 1;
8894 // FIXME: Eliminate unreachable epilogs
8897 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8898 * only reachable from this call.
8900 GET_BBLOCK (cfg, tblock, ip + 5);
8901 if (tblock == bblock || tblock->in_count == 0)
8908 /* End of call, INS should contain the result of the call, if any */
8910 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8913 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8918 if (keep_this_alive) {
8919 MonoInst *dummy_use;
8921 /* See mono_emit_method_call_full () */
8922 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8925 CHECK_CFG_EXCEPTION;
8929 g_assert (*ip == CEE_RET);
8933 constrained_call = NULL;
8935 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8939 if (cfg->method != method) {
8940 /* return from inlined method */
8942 * If in_count == 0, that means the ret is unreachable due to
8943 * being preceeded by a throw. In that case, inline_method () will
8944 * handle setting the return value
8945 * (test case: test_0_inline_throw ()).
8947 if (return_var && cfg->cbb->in_count) {
8948 MonoType *ret_type = mono_method_signature (method)->ret;
8954 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8957 //g_assert (returnvar != -1);
8958 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8959 cfg->ret_var_set = TRUE;
8962 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8964 if (cfg->lmf_var && cfg->cbb->in_count)
8968 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8970 if (seq_points && !sym_seq_points) {
8972 * Place a seq point here too even through the IL stack is not
8973 * empty, so a step over on
8976 * will work correctly.
8978 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8979 MONO_ADD_INS (cfg->cbb, ins);
8982 g_assert (!return_var);
8986 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8989 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8992 if (!cfg->vret_addr) {
8995 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8997 EMIT_NEW_RETLOADA (cfg, ret_addr);
8999 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9000 ins->klass = mono_class_from_mono_type (ret_type);
9003 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9004 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9005 MonoInst *iargs [1];
9009 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9010 mono_arch_emit_setret (cfg, method, conv);
9012 mono_arch_emit_setret (cfg, method, *sp);
9015 mono_arch_emit_setret (cfg, method, *sp);
9020 if (sp != stack_start)
9022 MONO_INST_NEW (cfg, ins, OP_BR);
9024 ins->inst_target_bb = end_bblock;
9025 MONO_ADD_INS (bblock, ins);
9026 link_bblock (cfg, bblock, end_bblock);
9027 start_new_bblock = 1;
9031 MONO_INST_NEW (cfg, ins, OP_BR);
9033 target = ip + 1 + (signed char)(*ip);
9035 GET_BBLOCK (cfg, tblock, target);
9036 link_bblock (cfg, bblock, tblock);
9037 ins->inst_target_bb = tblock;
9038 if (sp != stack_start) {
9039 handle_stack_args (cfg, stack_start, sp - stack_start);
9041 CHECK_UNVERIFIABLE (cfg);
9043 MONO_ADD_INS (bblock, ins);
9044 start_new_bblock = 1;
9045 inline_costs += BRANCH_COST;
9059 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9061 target = ip + 1 + *(signed char*)ip;
9067 inline_costs += BRANCH_COST;
9071 MONO_INST_NEW (cfg, ins, OP_BR);
9074 target = ip + 4 + (gint32)read32(ip);
9076 GET_BBLOCK (cfg, tblock, target);
9077 link_bblock (cfg, bblock, tblock);
9078 ins->inst_target_bb = tblock;
9079 if (sp != stack_start) {
9080 handle_stack_args (cfg, stack_start, sp - stack_start);
9082 CHECK_UNVERIFIABLE (cfg);
9085 MONO_ADD_INS (bblock, ins);
9087 start_new_bblock = 1;
9088 inline_costs += BRANCH_COST;
9095 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9096 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9097 guint32 opsize = is_short ? 1 : 4;
9099 CHECK_OPSIZE (opsize);
9101 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9104 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9109 GET_BBLOCK (cfg, tblock, target);
9110 link_bblock (cfg, bblock, tblock);
9111 GET_BBLOCK (cfg, tblock, ip);
9112 link_bblock (cfg, bblock, tblock);
9114 if (sp != stack_start) {
9115 handle_stack_args (cfg, stack_start, sp - stack_start);
9116 CHECK_UNVERIFIABLE (cfg);
9119 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9120 cmp->sreg1 = sp [0]->dreg;
9121 type_from_op (cmp, sp [0], NULL);
9124 #if SIZEOF_REGISTER == 4
9125 if (cmp->opcode == OP_LCOMPARE_IMM) {
9126 /* Convert it to OP_LCOMPARE */
9127 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9128 ins->type = STACK_I8;
9129 ins->dreg = alloc_dreg (cfg, STACK_I8);
9131 MONO_ADD_INS (bblock, ins);
9132 cmp->opcode = OP_LCOMPARE;
9133 cmp->sreg2 = ins->dreg;
9136 MONO_ADD_INS (bblock, cmp);
9138 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9139 type_from_op (ins, sp [0], NULL);
9140 MONO_ADD_INS (bblock, ins);
9141 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9142 GET_BBLOCK (cfg, tblock, target);
9143 ins->inst_true_bb = tblock;
9144 GET_BBLOCK (cfg, tblock, ip);
9145 ins->inst_false_bb = tblock;
9146 start_new_bblock = 2;
9149 inline_costs += BRANCH_COST;
9164 MONO_INST_NEW (cfg, ins, *ip);
9166 target = ip + 4 + (gint32)read32(ip);
9172 inline_costs += BRANCH_COST;
9176 MonoBasicBlock **targets;
9177 MonoBasicBlock *default_bblock;
9178 MonoJumpInfoBBTable *table;
9179 int offset_reg = alloc_preg (cfg);
9180 int target_reg = alloc_preg (cfg);
9181 int table_reg = alloc_preg (cfg);
9182 int sum_reg = alloc_preg (cfg);
9183 gboolean use_op_switch;
9187 n = read32 (ip + 1);
9190 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9194 CHECK_OPSIZE (n * sizeof (guint32));
9195 target = ip + n * sizeof (guint32);
9197 GET_BBLOCK (cfg, default_bblock, target);
9198 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9200 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9201 for (i = 0; i < n; ++i) {
9202 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9203 targets [i] = tblock;
9204 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9208 if (sp != stack_start) {
9210 * Link the current bb with the targets as well, so handle_stack_args
9211 * will set their in_stack correctly.
9213 link_bblock (cfg, bblock, default_bblock);
9214 for (i = 0; i < n; ++i)
9215 link_bblock (cfg, bblock, targets [i]);
9217 handle_stack_args (cfg, stack_start, sp - stack_start);
9219 CHECK_UNVERIFIABLE (cfg);
9222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9226 for (i = 0; i < n; ++i)
9227 link_bblock (cfg, bblock, targets [i]);
9229 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9230 table->table = targets;
9231 table->table_size = n;
9233 use_op_switch = FALSE;
9235 /* ARM implements SWITCH statements differently */
9236 /* FIXME: Make it use the generic implementation */
9237 if (!cfg->compile_aot)
9238 use_op_switch = TRUE;
9241 if (COMPILE_LLVM (cfg))
9242 use_op_switch = TRUE;
9244 cfg->cbb->has_jump_table = 1;
9246 if (use_op_switch) {
9247 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9248 ins->sreg1 = src1->dreg;
9249 ins->inst_p0 = table;
9250 ins->inst_many_bb = targets;
9251 ins->klass = GUINT_TO_POINTER (n);
9252 MONO_ADD_INS (cfg->cbb, ins);
9254 if (sizeof (gpointer) == 8)
9255 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9257 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9259 #if SIZEOF_REGISTER == 8
9260 /* The upper word might not be zero, and we add it to a 64 bit address later */
9261 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9264 if (cfg->compile_aot) {
9265 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9267 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9268 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9269 ins->inst_p0 = table;
9270 ins->dreg = table_reg;
9271 MONO_ADD_INS (cfg->cbb, ins);
9274 /* FIXME: Use load_memindex */
9275 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9277 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9279 start_new_bblock = 1;
9280 inline_costs += (BRANCH_COST * 2);
9300 dreg = alloc_freg (cfg);
9303 dreg = alloc_lreg (cfg);
9306 dreg = alloc_ireg_ref (cfg);
9309 dreg = alloc_preg (cfg);
9312 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9313 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9314 ins->flags |= ins_flag;
9315 MONO_ADD_INS (bblock, ins);
9317 if (ins_flag & MONO_INST_VOLATILE) {
9318 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9319 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9320 emit_memory_barrier (cfg, FullBarrier);
9336 if (ins_flag & MONO_INST_VOLATILE) {
9337 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9338 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9339 emit_memory_barrier (cfg, FullBarrier);
9342 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9343 ins->flags |= ins_flag;
9346 MONO_ADD_INS (bblock, ins);
9348 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9349 emit_write_barrier (cfg, sp [0], sp [1]);
9358 MONO_INST_NEW (cfg, ins, (*ip));
9360 ins->sreg1 = sp [0]->dreg;
9361 ins->sreg2 = sp [1]->dreg;
9362 type_from_op (ins, sp [0], sp [1]);
9364 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9366 /* Use the immediate opcodes if possible */
9367 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9368 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9369 if (imm_opcode != -1) {
9370 ins->opcode = imm_opcode;
9371 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9374 NULLIFY_INS (sp [1]);
9378 MONO_ADD_INS ((cfg)->cbb, (ins));
9380 *sp++ = mono_decompose_opcode (cfg, ins);
9397 MONO_INST_NEW (cfg, ins, (*ip));
9399 ins->sreg1 = sp [0]->dreg;
9400 ins->sreg2 = sp [1]->dreg;
9401 type_from_op (ins, sp [0], sp [1]);
9403 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9404 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9406 /* FIXME: Pass opcode to is_inst_imm */
9408 /* Use the immediate opcodes if possible */
9409 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9412 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9413 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9414 /* Keep emulated opcodes which are optimized away later */
9415 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9416 imm_opcode = mono_op_to_op_imm (ins->opcode);
9419 if (imm_opcode != -1) {
9420 ins->opcode = imm_opcode;
9421 if (sp [1]->opcode == OP_I8CONST) {
9422 #if SIZEOF_REGISTER == 8
9423 ins->inst_imm = sp [1]->inst_l;
9425 ins->inst_ls_word = sp [1]->inst_ls_word;
9426 ins->inst_ms_word = sp [1]->inst_ms_word;
9430 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9433 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9434 if (sp [1]->next == NULL)
9435 NULLIFY_INS (sp [1]);
9438 MONO_ADD_INS ((cfg)->cbb, (ins));
9440 *sp++ = mono_decompose_opcode (cfg, ins);
9453 case CEE_CONV_OVF_I8:
9454 case CEE_CONV_OVF_U8:
9458 /* Special case this earlier so we have long constants in the IR */
9459 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9460 int data = sp [-1]->inst_c0;
9461 sp [-1]->opcode = OP_I8CONST;
9462 sp [-1]->type = STACK_I8;
9463 #if SIZEOF_REGISTER == 8
9464 if ((*ip) == CEE_CONV_U8)
9465 sp [-1]->inst_c0 = (guint32)data;
9467 sp [-1]->inst_c0 = data;
9469 sp [-1]->inst_ls_word = data;
9470 if ((*ip) == CEE_CONV_U8)
9471 sp [-1]->inst_ms_word = 0;
9473 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9475 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9482 case CEE_CONV_OVF_I4:
9483 case CEE_CONV_OVF_I1:
9484 case CEE_CONV_OVF_I2:
9485 case CEE_CONV_OVF_I:
9486 case CEE_CONV_OVF_U:
9489 if (sp [-1]->type == STACK_R8) {
9490 ADD_UNOP (CEE_CONV_OVF_I8);
9497 case CEE_CONV_OVF_U1:
9498 case CEE_CONV_OVF_U2:
9499 case CEE_CONV_OVF_U4:
9502 if (sp [-1]->type == STACK_R8) {
9503 ADD_UNOP (CEE_CONV_OVF_U8);
9510 case CEE_CONV_OVF_I1_UN:
9511 case CEE_CONV_OVF_I2_UN:
9512 case CEE_CONV_OVF_I4_UN:
9513 case CEE_CONV_OVF_I8_UN:
9514 case CEE_CONV_OVF_U1_UN:
9515 case CEE_CONV_OVF_U2_UN:
9516 case CEE_CONV_OVF_U4_UN:
9517 case CEE_CONV_OVF_U8_UN:
9518 case CEE_CONV_OVF_I_UN:
9519 case CEE_CONV_OVF_U_UN:
9526 CHECK_CFG_EXCEPTION;
9530 case CEE_ADD_OVF_UN:
9532 case CEE_MUL_OVF_UN:
9534 case CEE_SUB_OVF_UN:
9540 GSHAREDVT_FAILURE (*ip);
9543 token = read32 (ip + 1);
9544 klass = mini_get_class (method, token, generic_context);
9545 CHECK_TYPELOAD (klass);
9547 if (generic_class_is_reference_type (cfg, klass)) {
9548 MonoInst *store, *load;
9549 int dreg = alloc_ireg_ref (cfg);
9551 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9552 load->flags |= ins_flag;
9553 MONO_ADD_INS (cfg->cbb, load);
9555 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9556 store->flags |= ins_flag;
9557 MONO_ADD_INS (cfg->cbb, store);
9559 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9560 emit_write_barrier (cfg, sp [0], sp [1]);
9562 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9574 token = read32 (ip + 1);
9575 klass = mini_get_class (method, token, generic_context);
9576 CHECK_TYPELOAD (klass);
9578 /* Optimize the common ldobj+stloc combination */
9588 loc_index = ip [5] - CEE_STLOC_0;
9595 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9596 CHECK_LOCAL (loc_index);
9598 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9599 ins->dreg = cfg->locals [loc_index]->dreg;
9600 ins->flags |= ins_flag;
9603 if (ins_flag & MONO_INST_VOLATILE) {
9604 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9605 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9606 emit_memory_barrier (cfg, FullBarrier);
9612 /* Optimize the ldobj+stobj combination */
9613 /* The reference case ends up being a load+store anyway */
9614 /* Skip this if the operation is volatile. */
9615 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9620 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9627 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9628 ins->flags |= ins_flag;
9631 if (ins_flag & MONO_INST_VOLATILE) {
9632 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9633 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9634 emit_memory_barrier (cfg, FullBarrier);
9643 CHECK_STACK_OVF (1);
9645 n = read32 (ip + 1);
9647 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9648 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9649 ins->type = STACK_OBJ;
9652 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9653 MonoInst *iargs [1];
9655 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9656 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9658 if (cfg->opt & MONO_OPT_SHARED) {
9659 MonoInst *iargs [3];
9661 if (cfg->compile_aot) {
9662 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9664 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9665 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9666 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9667 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9668 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9670 if (bblock->out_of_line) {
9671 MonoInst *iargs [2];
9673 if (image == mono_defaults.corlib) {
9675 * Avoid relocations in AOT and save some space by using a
9676 * version of helper_ldstr specialized to mscorlib.
9678 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9679 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9681 /* Avoid creating the string object */
9682 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9683 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9684 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9688 if (cfg->compile_aot) {
9689 NEW_LDSTRCONST (cfg, ins, image, n);
9691 MONO_ADD_INS (bblock, ins);
9694 NEW_PCONST (cfg, ins, NULL);
9695 ins->type = STACK_OBJ;
9696 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9698 OUT_OF_MEMORY_FAILURE;
9701 MONO_ADD_INS (bblock, ins);
9710 MonoInst *iargs [2];
9711 MonoMethodSignature *fsig;
9714 MonoInst *vtable_arg = NULL;
9717 token = read32 (ip + 1);
9718 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9719 if (!cmethod || mono_loader_get_last_error ())
9721 fsig = mono_method_get_signature_checked (cmethod, image, token, NULL, &cfg->error);
9724 mono_save_token_info (cfg, image, token, cmethod);
9726 if (!mono_class_init (cmethod->klass))
9727 TYPE_LOAD_ERROR (cmethod->klass);
9729 context_used = mini_method_check_context_used (cfg, cmethod);
9731 if (mono_security_cas_enabled ()) {
9732 if (check_linkdemand (cfg, method, cmethod))
9733 INLINE_FAILURE ("linkdemand");
9734 CHECK_CFG_EXCEPTION;
9735 } else if (mono_security_core_clr_enabled ()) {
9736 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9739 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9740 emit_generic_class_init (cfg, cmethod->klass);
9741 CHECK_TYPELOAD (cmethod->klass);
9745 if (cfg->gsharedvt) {
9746 if (mini_is_gsharedvt_variable_signature (sig))
9747 GSHAREDVT_FAILURE (*ip);
9751 n = fsig->param_count;
9755 * Generate smaller code for the common newobj <exception> instruction in
9756 * argument checking code.
9758 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9759 is_exception_class (cmethod->klass) && n <= 2 &&
9760 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9761 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9762 MonoInst *iargs [3];
9766 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9769 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9773 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9778 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9781 g_assert_not_reached ();
9789 /* move the args to allow room for 'this' in the first position */
9795 /* check_call_signature () requires sp[0] to be set */
9796 this_ins.type = STACK_OBJ;
9798 if (check_call_signature (cfg, fsig, sp))
9803 if (mini_class_is_system_array (cmethod->klass)) {
9804 *sp = emit_get_rgctx_method (cfg, context_used,
9805 cmethod, MONO_RGCTX_INFO_METHOD);
9807 /* Avoid varargs in the common case */
9808 if (fsig->param_count == 1)
9809 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9810 else if (fsig->param_count == 2)
9811 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9812 else if (fsig->param_count == 3)
9813 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9814 else if (fsig->param_count == 4)
9815 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9817 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9818 } else if (cmethod->string_ctor) {
9819 g_assert (!context_used);
9820 g_assert (!vtable_arg);
9821 /* we simply pass a null pointer */
9822 EMIT_NEW_PCONST (cfg, *sp, NULL);
9823 /* now call the string ctor */
9824 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9826 if (cmethod->klass->valuetype) {
9827 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9828 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9829 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9834 * The code generated by mini_emit_virtual_call () expects
9835 * iargs [0] to be a boxed instance, but luckily the vcall
9836 * will be transformed into a normal call there.
9838 } else if (context_used) {
9839 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9842 MonoVTable *vtable = NULL;
9844 if (!cfg->compile_aot)
9845 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9846 CHECK_TYPELOAD (cmethod->klass);
9849 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9850 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9851 * As a workaround, we call class cctors before allocating objects.
9853 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9854 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9855 if (cfg->verbose_level > 2)
9856 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9857 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9860 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9863 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9866 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9868 /* Now call the actual ctor */
9869 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
9870 CHECK_CFG_EXCEPTION;
9873 if (alloc == NULL) {
9875 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9876 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9890 token = read32 (ip + 1);
9891 klass = mini_get_class (method, token, generic_context);
9892 CHECK_TYPELOAD (klass);
9893 if (sp [0]->type != STACK_OBJ)
9896 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
9897 CHECK_CFG_EXCEPTION;
9906 token = read32 (ip + 1);
9907 klass = mini_get_class (method, token, generic_context);
9908 CHECK_TYPELOAD (klass);
9909 if (sp [0]->type != STACK_OBJ)
9912 context_used = mini_class_check_context_used (cfg, klass);
9914 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9915 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9922 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9925 if (cfg->compile_aot)
9926 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9928 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9930 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9933 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9934 MonoMethod *mono_isinst;
9935 MonoInst *iargs [1];
9938 mono_isinst = mono_marshal_get_isinst (klass);
9941 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9942 iargs, ip, cfg->real_offset, TRUE, &bblock);
9943 CHECK_CFG_EXCEPTION;
9944 g_assert (costs > 0);
9947 cfg->real_offset += 5;
9951 inline_costs += costs;
9954 ins = handle_isinst (cfg, klass, *sp, context_used);
9955 CHECK_CFG_EXCEPTION;
9962 case CEE_UNBOX_ANY: {
9963 MonoInst *res, *addr;
9968 token = read32 (ip + 1);
9969 klass = mini_get_class (method, token, generic_context);
9970 CHECK_TYPELOAD (klass);
9972 mono_save_token_info (cfg, image, token, klass);
9974 context_used = mini_class_check_context_used (cfg, klass);
9976 if (mini_is_gsharedvt_klass (cfg, klass)) {
9977 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9979 } else if (generic_class_is_reference_type (cfg, klass)) {
9980 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
9981 CHECK_CFG_EXCEPTION;
9982 } else if (mono_class_is_nullable (klass)) {
9983 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
9985 addr = handle_unbox (cfg, klass, sp, context_used);
9987 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10003 token = read32 (ip + 1);
10004 klass = mini_get_class (method, token, generic_context);
10005 CHECK_TYPELOAD (klass);
10007 mono_save_token_info (cfg, image, token, klass);
10009 context_used = mini_class_check_context_used (cfg, klass);
10011 if (generic_class_is_reference_type (cfg, klass)) {
10017 if (klass == mono_defaults.void_class)
10019 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10021 /* frequent check in generic code: box (struct), brtrue */
10023 // FIXME: LLVM can't handle the inconsistent bb linking
10024 if (!mono_class_is_nullable (klass) &&
10025 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10026 (ip [5] == CEE_BRTRUE ||
10027 ip [5] == CEE_BRTRUE_S ||
10028 ip [5] == CEE_BRFALSE ||
10029 ip [5] == CEE_BRFALSE_S)) {
10030 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10032 MonoBasicBlock *true_bb, *false_bb;
10036 if (cfg->verbose_level > 3) {
10037 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10038 printf ("<box+brtrue opt>\n");
10043 case CEE_BRFALSE_S:
10046 target = ip + 1 + (signed char)(*ip);
10053 target = ip + 4 + (gint)(read32 (ip));
10057 g_assert_not_reached ();
10061 * We need to link both bblocks, since it is needed for handling stack
10062 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10063 * Branching to only one of them would lead to inconsistencies, so
10064 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10066 GET_BBLOCK (cfg, true_bb, target);
10067 GET_BBLOCK (cfg, false_bb, ip);
10069 mono_link_bblock (cfg, cfg->cbb, true_bb);
10070 mono_link_bblock (cfg, cfg->cbb, false_bb);
10072 if (sp != stack_start) {
10073 handle_stack_args (cfg, stack_start, sp - stack_start);
10075 CHECK_UNVERIFIABLE (cfg);
10078 if (COMPILE_LLVM (cfg)) {
10079 dreg = alloc_ireg (cfg);
10080 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10081 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10083 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10085 /* The JIT can't eliminate the iconst+compare */
10086 MONO_INST_NEW (cfg, ins, OP_BR);
10087 ins->inst_target_bb = is_true ? true_bb : false_bb;
10088 MONO_ADD_INS (cfg->cbb, ins);
10091 start_new_bblock = 1;
10095 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10097 CHECK_CFG_EXCEPTION;
10106 token = read32 (ip + 1);
10107 klass = mini_get_class (method, token, generic_context);
10108 CHECK_TYPELOAD (klass);
10110 mono_save_token_info (cfg, image, token, klass);
10112 context_used = mini_class_check_context_used (cfg, klass);
10114 if (mono_class_is_nullable (klass)) {
10117 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10118 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10122 ins = handle_unbox (cfg, klass, sp, context_used);
10135 MonoClassField *field;
10136 #ifndef DISABLE_REMOTING
10140 gboolean is_instance;
10142 gpointer addr = NULL;
10143 gboolean is_special_static;
10145 MonoInst *store_val = NULL;
10146 MonoInst *thread_ins;
10149 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10151 if (op == CEE_STFLD) {
10154 store_val = sp [1];
10159 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10161 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10164 if (op == CEE_STSFLD) {
10167 store_val = sp [0];
10172 token = read32 (ip + 1);
10173 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10174 field = mono_method_get_wrapper_data (method, token);
10175 klass = field->parent;
10178 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10181 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10182 FIELD_ACCESS_FAILURE (method, field);
10183 mono_class_init (klass);
10185 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10188 /* if the class is Critical then transparent code cannot access it's fields */
10189 if (!is_instance && mono_security_core_clr_enabled ())
10190 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10192 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10193 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10194 if (mono_security_core_clr_enabled ())
10195 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10199 * LDFLD etc. is usable on static fields as well, so convert those cases to
10202 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10214 g_assert_not_reached ();
10216 is_instance = FALSE;
10219 context_used = mini_class_check_context_used (cfg, klass);
10221 /* INSTANCE CASE */
10223 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10224 if (op == CEE_STFLD) {
10225 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10227 #ifndef DISABLE_REMOTING
10228 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10229 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10230 MonoInst *iargs [5];
10232 GSHAREDVT_FAILURE (op);
10234 iargs [0] = sp [0];
10235 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10236 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10237 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10239 iargs [4] = sp [1];
10241 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10242 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10243 iargs, ip, cfg->real_offset, TRUE, &bblock);
10244 CHECK_CFG_EXCEPTION;
10245 g_assert (costs > 0);
10247 cfg->real_offset += 5;
10249 inline_costs += costs;
10251 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10258 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10260 if (mini_is_gsharedvt_klass (cfg, klass)) {
10261 MonoInst *offset_ins;
10263 context_used = mini_class_check_context_used (cfg, klass);
10265 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10266 dreg = alloc_ireg_mp (cfg);
10267 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10268 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10269 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10271 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10273 if (sp [0]->opcode != OP_LDADDR)
10274 store->flags |= MONO_INST_FAULT;
10276 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10277 /* insert call to write barrier */
10281 dreg = alloc_ireg_mp (cfg);
10282 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10283 emit_write_barrier (cfg, ptr, sp [1]);
10286 store->flags |= ins_flag;
10293 #ifndef DISABLE_REMOTING
10294 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10295 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10296 MonoInst *iargs [4];
10298 GSHAREDVT_FAILURE (op);
10300 iargs [0] = sp [0];
10301 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10302 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10303 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10304 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10305 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10306 iargs, ip, cfg->real_offset, TRUE, &bblock);
10307 CHECK_CFG_EXCEPTION;
10308 g_assert (costs > 0);
10310 cfg->real_offset += 5;
10314 inline_costs += costs;
10316 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10322 if (sp [0]->type == STACK_VTYPE) {
10325 /* Have to compute the address of the variable */
10327 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10329 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10331 g_assert (var->klass == klass);
10333 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10337 if (op == CEE_LDFLDA) {
10338 if (is_magic_tls_access (field)) {
10339 GSHAREDVT_FAILURE (*ip);
10341 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10343 if (sp [0]->type == STACK_OBJ) {
10344 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10345 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10348 dreg = alloc_ireg_mp (cfg);
10350 if (mini_is_gsharedvt_klass (cfg, klass)) {
10351 MonoInst *offset_ins;
10353 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10354 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10356 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10358 ins->klass = mono_class_from_mono_type (field->type);
10359 ins->type = STACK_MP;
10365 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10367 if (mini_is_gsharedvt_klass (cfg, klass)) {
10368 MonoInst *offset_ins;
10370 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10371 dreg = alloc_ireg_mp (cfg);
10372 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10373 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10375 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10377 load->flags |= ins_flag;
10378 if (sp [0]->opcode != OP_LDADDR)
10379 load->flags |= MONO_INST_FAULT;
10393 * We can only support shared generic static
10394 * field access on architectures where the
10395 * trampoline code has been extended to handle
10396 * the generic class init.
10398 #ifndef MONO_ARCH_VTABLE_REG
10399 GENERIC_SHARING_FAILURE (op);
10402 context_used = mini_class_check_context_used (cfg, klass);
10404 ftype = mono_field_get_type (field);
10406 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10409 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10410 * to be called here.
10412 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10413 mono_class_vtable (cfg->domain, klass);
10414 CHECK_TYPELOAD (klass);
10416 mono_domain_lock (cfg->domain);
10417 if (cfg->domain->special_static_fields)
10418 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10419 mono_domain_unlock (cfg->domain);
10421 is_special_static = mono_class_field_is_special_static (field);
10423 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10424 thread_ins = mono_get_thread_intrinsic (cfg);
10428 /* Generate IR to compute the field address */
10429 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10431 * Fast access to TLS data
10432 * Inline version of get_thread_static_data () in
10436 int idx, static_data_reg, array_reg, dreg;
10438 GSHAREDVT_FAILURE (op);
10440 // offset &= 0x7fffffff;
10441 // idx = (offset >> 24) - 1;
10442 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10443 MONO_ADD_INS (cfg->cbb, thread_ins);
10444 static_data_reg = alloc_ireg (cfg);
10445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10447 if (cfg->compile_aot) {
10448 int offset_reg, offset2_reg, idx_reg;
10450 /* For TLS variables, this will return the TLS offset */
10451 EMIT_NEW_SFLDACONST (cfg, ins, field);
10452 offset_reg = ins->dreg;
10453 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10454 idx_reg = alloc_ireg (cfg);
10455 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10457 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10458 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10459 array_reg = alloc_ireg (cfg);
10460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10461 offset2_reg = alloc_ireg (cfg);
10462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10463 dreg = alloc_ireg (cfg);
10464 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10466 offset = (gsize)addr & 0x7fffffff;
10467 idx = (offset >> 24) - 1;
10469 array_reg = alloc_ireg (cfg);
10470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10471 dreg = alloc_ireg (cfg);
10472 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10474 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10475 (cfg->compile_aot && is_special_static) ||
10476 (context_used && is_special_static)) {
10477 MonoInst *iargs [2];
10479 g_assert (field->parent);
10480 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10481 if (context_used) {
10482 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10483 field, MONO_RGCTX_INFO_CLASS_FIELD);
10485 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10487 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10488 } else if (context_used) {
10489 MonoInst *static_data;
10492 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10493 method->klass->name_space, method->klass->name, method->name,
10494 depth, field->offset);
10497 if (mono_class_needs_cctor_run (klass, method))
10498 emit_generic_class_init (cfg, klass);
10501 * The pointer we're computing here is
10503 * super_info.static_data + field->offset
10505 static_data = emit_get_rgctx_klass (cfg, context_used,
10506 klass, MONO_RGCTX_INFO_STATIC_DATA);
10508 if (mini_is_gsharedvt_klass (cfg, klass)) {
10509 MonoInst *offset_ins;
10511 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10512 dreg = alloc_ireg_mp (cfg);
10513 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10514 } else if (field->offset == 0) {
10517 int addr_reg = mono_alloc_preg (cfg);
10518 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10520 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10521 MonoInst *iargs [2];
10523 g_assert (field->parent);
10524 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10525 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10526 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10528 MonoVTable *vtable = NULL;
10530 if (!cfg->compile_aot)
10531 vtable = mono_class_vtable (cfg->domain, klass);
10532 CHECK_TYPELOAD (klass);
10535 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10536 if (!(g_slist_find (class_inits, klass))) {
10537 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10538 if (cfg->verbose_level > 2)
10539 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10540 class_inits = g_slist_prepend (class_inits, klass);
10543 if (cfg->run_cctors) {
10545 /* This makes so that inline cannot trigger */
10546 /* .cctors: too many apps depend on them */
10547 /* running with a specific order... */
10549 if (! vtable->initialized)
10550 INLINE_FAILURE ("class init");
10551 ex = mono_runtime_class_init_full (vtable, FALSE);
10553 set_exception_object (cfg, ex);
10554 goto exception_exit;
10558 if (cfg->compile_aot)
10559 EMIT_NEW_SFLDACONST (cfg, ins, field);
10562 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10564 EMIT_NEW_PCONST (cfg, ins, addr);
10567 MonoInst *iargs [1];
10568 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10569 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10573 /* Generate IR to do the actual load/store operation */
10575 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10576 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10577 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10578 emit_memory_barrier (cfg, FullBarrier);
10581 if (op == CEE_LDSFLDA) {
10582 ins->klass = mono_class_from_mono_type (ftype);
10583 ins->type = STACK_PTR;
10585 } else if (op == CEE_STSFLD) {
10588 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10589 store->flags |= ins_flag;
10591 gboolean is_const = FALSE;
10592 MonoVTable *vtable = NULL;
10593 gpointer addr = NULL;
10595 if (!context_used) {
10596 vtable = mono_class_vtable (cfg->domain, klass);
10597 CHECK_TYPELOAD (klass);
10599 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10600 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10601 int ro_type = ftype->type;
10603 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10604 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10605 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10608 GSHAREDVT_FAILURE (op);
10610 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10613 case MONO_TYPE_BOOLEAN:
10615 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10619 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10622 case MONO_TYPE_CHAR:
10624 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10628 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10633 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10637 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10642 case MONO_TYPE_PTR:
10643 case MONO_TYPE_FNPTR:
10644 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10645 type_to_eval_stack_type ((cfg), field->type, *sp);
10648 case MONO_TYPE_STRING:
10649 case MONO_TYPE_OBJECT:
10650 case MONO_TYPE_CLASS:
10651 case MONO_TYPE_SZARRAY:
10652 case MONO_TYPE_ARRAY:
10653 if (!mono_gc_is_moving ()) {
10654 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10655 type_to_eval_stack_type ((cfg), field->type, *sp);
10663 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10668 case MONO_TYPE_VALUETYPE:
10678 CHECK_STACK_OVF (1);
10680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10681 load->flags |= ins_flag;
10687 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10688 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10689 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10690 emit_memory_barrier (cfg, FullBarrier);
10701 token = read32 (ip + 1);
10702 klass = mini_get_class (method, token, generic_context);
10703 CHECK_TYPELOAD (klass);
10704 if (ins_flag & MONO_INST_VOLATILE) {
10705 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10706 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10707 emit_memory_barrier (cfg, FullBarrier);
10709 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10710 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10711 ins->flags |= ins_flag;
10712 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10713 generic_class_is_reference_type (cfg, klass)) {
10714 /* insert call to write barrier */
10715 emit_write_barrier (cfg, sp [0], sp [1]);
10727 const char *data_ptr;
10729 guint32 field_token;
10735 token = read32 (ip + 1);
10737 klass = mini_get_class (method, token, generic_context);
10738 CHECK_TYPELOAD (klass);
10740 context_used = mini_class_check_context_used (cfg, klass);
10742 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10743 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10744 ins->sreg1 = sp [0]->dreg;
10745 ins->type = STACK_I4;
10746 ins->dreg = alloc_ireg (cfg);
10747 MONO_ADD_INS (cfg->cbb, ins);
10748 *sp = mono_decompose_opcode (cfg, ins);
10751 if (context_used) {
10752 MonoInst *args [3];
10753 MonoClass *array_class = mono_array_class_get (klass, 1);
10754 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10756 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10759 args [0] = emit_get_rgctx_klass (cfg, context_used,
10760 array_class, MONO_RGCTX_INFO_VTABLE);
10765 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10767 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10769 if (cfg->opt & MONO_OPT_SHARED) {
10770 /* Decompose now to avoid problems with references to the domainvar */
10771 MonoInst *iargs [3];
10773 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10774 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10775 iargs [2] = sp [0];
10777 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10779 /* Decompose later since it is needed by abcrem */
10780 MonoClass *array_type = mono_array_class_get (klass, 1);
10781 mono_class_vtable (cfg->domain, array_type);
10782 CHECK_TYPELOAD (array_type);
10784 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10785 ins->dreg = alloc_ireg_ref (cfg);
10786 ins->sreg1 = sp [0]->dreg;
10787 ins->inst_newa_class = klass;
10788 ins->type = STACK_OBJ;
10789 ins->klass = array_type;
10790 MONO_ADD_INS (cfg->cbb, ins);
10791 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10792 cfg->cbb->has_array_access = TRUE;
10794 /* Needed so mono_emit_load_get_addr () gets called */
10795 mono_get_got_var (cfg);
10805 * we inline/optimize the initialization sequence if possible.
10806 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10807 * for small sizes open code the memcpy
10808 * ensure the rva field is big enough
10810 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10811 MonoMethod *memcpy_method = get_memcpy_method ();
10812 MonoInst *iargs [3];
10813 int add_reg = alloc_ireg_mp (cfg);
10815 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10816 if (cfg->compile_aot) {
10817 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10819 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10821 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10822 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10831 if (sp [0]->type != STACK_OBJ)
10834 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10835 ins->dreg = alloc_preg (cfg);
10836 ins->sreg1 = sp [0]->dreg;
10837 ins->type = STACK_I4;
10838 /* This flag will be inherited by the decomposition */
10839 ins->flags |= MONO_INST_FAULT;
10840 MONO_ADD_INS (cfg->cbb, ins);
10841 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10842 cfg->cbb->has_array_access = TRUE;
10850 if (sp [0]->type != STACK_OBJ)
10853 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10855 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10856 CHECK_TYPELOAD (klass);
10857 /* we need to make sure that this array is exactly the type it needs
10858 * to be for correctness. the wrappers are lax with their usage
10859 * so we need to ignore them here
10861 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10862 MonoClass *array_class = mono_array_class_get (klass, 1);
10863 mini_emit_check_array_type (cfg, sp [0], array_class);
10864 CHECK_TYPELOAD (array_class);
10868 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10873 case CEE_LDELEM_I1:
10874 case CEE_LDELEM_U1:
10875 case CEE_LDELEM_I2:
10876 case CEE_LDELEM_U2:
10877 case CEE_LDELEM_I4:
10878 case CEE_LDELEM_U4:
10879 case CEE_LDELEM_I8:
10881 case CEE_LDELEM_R4:
10882 case CEE_LDELEM_R8:
10883 case CEE_LDELEM_REF: {
10889 if (*ip == CEE_LDELEM) {
10891 token = read32 (ip + 1);
10892 klass = mini_get_class (method, token, generic_context);
10893 CHECK_TYPELOAD (klass);
10894 mono_class_init (klass);
10897 klass = array_access_to_klass (*ip);
10899 if (sp [0]->type != STACK_OBJ)
10902 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10904 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10905 // FIXME-VT: OP_ICONST optimization
10906 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10907 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10908 ins->opcode = OP_LOADV_MEMBASE;
10909 } else if (sp [1]->opcode == OP_ICONST) {
10910 int array_reg = sp [0]->dreg;
10911 int index_reg = sp [1]->dreg;
10912 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
10914 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10915 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10917 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10918 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10921 if (*ip == CEE_LDELEM)
10928 case CEE_STELEM_I1:
10929 case CEE_STELEM_I2:
10930 case CEE_STELEM_I4:
10931 case CEE_STELEM_I8:
10932 case CEE_STELEM_R4:
10933 case CEE_STELEM_R8:
10934 case CEE_STELEM_REF:
10939 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10941 if (*ip == CEE_STELEM) {
10943 token = read32 (ip + 1);
10944 klass = mini_get_class (method, token, generic_context);
10945 CHECK_TYPELOAD (klass);
10946 mono_class_init (klass);
10949 klass = array_access_to_klass (*ip);
10951 if (sp [0]->type != STACK_OBJ)
10954 emit_array_store (cfg, klass, sp, TRUE);
10956 if (*ip == CEE_STELEM)
10963 case CEE_CKFINITE: {
10967 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10968 ins->sreg1 = sp [0]->dreg;
10969 ins->dreg = alloc_freg (cfg);
10970 ins->type = STACK_R8;
10971 MONO_ADD_INS (bblock, ins);
10973 *sp++ = mono_decompose_opcode (cfg, ins);
10978 case CEE_REFANYVAL: {
10979 MonoInst *src_var, *src;
10981 int klass_reg = alloc_preg (cfg);
10982 int dreg = alloc_preg (cfg);
10984 GSHAREDVT_FAILURE (*ip);
10987 MONO_INST_NEW (cfg, ins, *ip);
10990 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10991 CHECK_TYPELOAD (klass);
10993 context_used = mini_class_check_context_used (cfg, klass);
10996 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10998 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10999 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11002 if (context_used) {
11003 MonoInst *klass_ins;
11005 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11006 klass, MONO_RGCTX_INFO_KLASS);
11009 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11010 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11012 mini_emit_class_check (cfg, klass_reg, klass);
11014 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11015 ins->type = STACK_MP;
11020 case CEE_MKREFANY: {
11021 MonoInst *loc, *addr;
11023 GSHAREDVT_FAILURE (*ip);
11026 MONO_INST_NEW (cfg, ins, *ip);
11029 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11030 CHECK_TYPELOAD (klass);
11032 context_used = mini_class_check_context_used (cfg, klass);
11034 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11035 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11037 if (context_used) {
11038 MonoInst *const_ins;
11039 int type_reg = alloc_preg (cfg);
11041 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11042 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11043 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11045 } else if (cfg->compile_aot) {
11046 int const_reg = alloc_preg (cfg);
11047 int type_reg = alloc_preg (cfg);
11049 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11050 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11051 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11052 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11054 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11055 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11057 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11059 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11060 ins->type = STACK_VTYPE;
11061 ins->klass = mono_defaults.typed_reference_class;
11066 case CEE_LDTOKEN: {
11068 MonoClass *handle_class;
11070 CHECK_STACK_OVF (1);
11073 n = read32 (ip + 1);
11075 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11076 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11077 handle = mono_method_get_wrapper_data (method, n);
11078 handle_class = mono_method_get_wrapper_data (method, n + 1);
11079 if (handle_class == mono_defaults.typehandle_class)
11080 handle = &((MonoClass*)handle)->byval_arg;
11083 handle = mono_ldtoken (image, n, &handle_class, generic_context);
11087 mono_class_init (handle_class);
11088 if (cfg->generic_sharing_context) {
11089 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11090 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11091 /* This case handles ldtoken
11092 of an open type, like for
11095 } else if (handle_class == mono_defaults.typehandle_class) {
11096 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11097 } else if (handle_class == mono_defaults.fieldhandle_class)
11098 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11099 else if (handle_class == mono_defaults.methodhandle_class)
11100 context_used = mini_method_check_context_used (cfg, handle);
11102 g_assert_not_reached ();
11105 if ((cfg->opt & MONO_OPT_SHARED) &&
11106 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11107 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11108 MonoInst *addr, *vtvar, *iargs [3];
11109 int method_context_used;
11111 method_context_used = mini_method_check_context_used (cfg, method);
11113 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11115 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11116 EMIT_NEW_ICONST (cfg, iargs [1], n);
11117 if (method_context_used) {
11118 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11119 method, MONO_RGCTX_INFO_METHOD);
11120 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11122 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11123 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11125 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11127 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11129 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11131 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11132 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11133 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11134 (cmethod->klass == mono_defaults.systemtype_class) &&
11135 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11136 MonoClass *tclass = mono_class_from_mono_type (handle);
11138 mono_class_init (tclass);
11139 if (context_used) {
11140 ins = emit_get_rgctx_klass (cfg, context_used,
11141 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11142 } else if (cfg->compile_aot) {
11143 if (method->wrapper_type) {
11144 mono_error_init (&error); //got to do it since there are multiple conditionals below
11145 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11146 /* Special case for static synchronized wrappers */
11147 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11149 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11150 /* FIXME: n is not a normal token */
11152 EMIT_NEW_PCONST (cfg, ins, NULL);
11155 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11158 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11160 ins->type = STACK_OBJ;
11161 ins->klass = cmethod->klass;
11164 MonoInst *addr, *vtvar;
11166 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11168 if (context_used) {
11169 if (handle_class == mono_defaults.typehandle_class) {
11170 ins = emit_get_rgctx_klass (cfg, context_used,
11171 mono_class_from_mono_type (handle),
11172 MONO_RGCTX_INFO_TYPE);
11173 } else if (handle_class == mono_defaults.methodhandle_class) {
11174 ins = emit_get_rgctx_method (cfg, context_used,
11175 handle, MONO_RGCTX_INFO_METHOD);
11176 } else if (handle_class == mono_defaults.fieldhandle_class) {
11177 ins = emit_get_rgctx_field (cfg, context_used,
11178 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11180 g_assert_not_reached ();
11182 } else if (cfg->compile_aot) {
11183 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11185 EMIT_NEW_PCONST (cfg, ins, handle);
11187 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11188 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11189 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11199 MONO_INST_NEW (cfg, ins, OP_THROW);
11201 ins->sreg1 = sp [0]->dreg;
11203 bblock->out_of_line = TRUE;
11204 MONO_ADD_INS (bblock, ins);
11205 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11206 MONO_ADD_INS (bblock, ins);
11209 link_bblock (cfg, bblock, end_bblock);
11210 start_new_bblock = 1;
11212 case CEE_ENDFINALLY:
11213 /* mono_save_seq_point_info () depends on this */
11214 if (sp != stack_start)
11215 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11216 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11217 MONO_ADD_INS (bblock, ins);
11219 start_new_bblock = 1;
11222 * Control will leave the method so empty the stack, otherwise
11223 * the next basic block will start with a nonempty stack.
11225 while (sp != stack_start) {
11230 case CEE_LEAVE_S: {
11233 if (*ip == CEE_LEAVE) {
11235 target = ip + 5 + (gint32)read32(ip + 1);
11238 target = ip + 2 + (signed char)(ip [1]);
11241 /* empty the stack */
11242 while (sp != stack_start) {
11247 * If this leave statement is in a catch block, check for a
11248 * pending exception, and rethrow it if necessary.
11249 * We avoid doing this in runtime invoke wrappers, since those are called
11250 * by native code which excepts the wrapper to catch all exceptions.
11252 for (i = 0; i < header->num_clauses; ++i) {
11253 MonoExceptionClause *clause = &header->clauses [i];
11256 * Use <= in the final comparison to handle clauses with multiple
11257 * leave statements, like in bug #78024.
11258 * The ordering of the exception clauses guarantees that we find the
11259 * innermost clause.
11261 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11263 MonoBasicBlock *dont_throw;
11268 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11271 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11273 NEW_BBLOCK (cfg, dont_throw);
11276 * Currently, we always rethrow the abort exception, despite the
11277 * fact that this is not correct. See thread6.cs for an example.
11278 * But propagating the abort exception is more important than
11279 * getting the sematics right.
11281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11283 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11285 MONO_START_BB (cfg, dont_throw);
11290 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11292 MonoExceptionClause *clause;
11294 for (tmp = handlers; tmp; tmp = tmp->next) {
11295 clause = tmp->data;
11296 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11298 link_bblock (cfg, bblock, tblock);
11299 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11300 ins->inst_target_bb = tblock;
11301 ins->inst_eh_block = clause;
11302 MONO_ADD_INS (bblock, ins);
11303 bblock->has_call_handler = 1;
11304 if (COMPILE_LLVM (cfg)) {
11305 MonoBasicBlock *target_bb;
11308 * Link the finally bblock with the target, since it will
11309 * conceptually branch there.
11310 * FIXME: Have to link the bblock containing the endfinally.
11312 GET_BBLOCK (cfg, target_bb, target);
11313 link_bblock (cfg, tblock, target_bb);
11316 g_list_free (handlers);
11319 MONO_INST_NEW (cfg, ins, OP_BR);
11320 MONO_ADD_INS (bblock, ins);
11321 GET_BBLOCK (cfg, tblock, target);
11322 link_bblock (cfg, bblock, tblock);
11323 ins->inst_target_bb = tblock;
11324 start_new_bblock = 1;
11326 if (*ip == CEE_LEAVE)
11335 * Mono specific opcodes
11337 case MONO_CUSTOM_PREFIX: {
11339 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11343 case CEE_MONO_ICALL: {
11345 MonoJitICallInfo *info;
11347 token = read32 (ip + 2);
11348 func = mono_method_get_wrapper_data (method, token);
11349 info = mono_find_jit_icall_by_addr (func);
11351 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11354 CHECK_STACK (info->sig->param_count);
11355 sp -= info->sig->param_count;
11357 ins = mono_emit_jit_icall (cfg, info->func, sp);
11358 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11362 inline_costs += 10 * num_calls++;
11366 case CEE_MONO_LDPTR: {
11369 CHECK_STACK_OVF (1);
11371 token = read32 (ip + 2);
11373 ptr = mono_method_get_wrapper_data (method, token);
11374 /* FIXME: Generalize this */
11375 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11376 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11381 EMIT_NEW_PCONST (cfg, ins, ptr);
11384 inline_costs += 10 * num_calls++;
11385 /* Can't embed random pointers into AOT code */
11389 case CEE_MONO_JIT_ICALL_ADDR: {
11390 MonoJitICallInfo *callinfo;
11393 CHECK_STACK_OVF (1);
11395 token = read32 (ip + 2);
11397 ptr = mono_method_get_wrapper_data (method, token);
11398 callinfo = mono_find_jit_icall_by_addr (ptr);
11399 g_assert (callinfo);
11400 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11403 inline_costs += 10 * num_calls++;
11406 case CEE_MONO_ICALL_ADDR: {
11407 MonoMethod *cmethod;
11410 CHECK_STACK_OVF (1);
11412 token = read32 (ip + 2);
11414 cmethod = mono_method_get_wrapper_data (method, token);
11416 if (cfg->compile_aot) {
11417 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11419 ptr = mono_lookup_internal_call (cmethod);
11421 EMIT_NEW_PCONST (cfg, ins, ptr);
11427 case CEE_MONO_VTADDR: {
11428 MonoInst *src_var, *src;
11434 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11435 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11440 case CEE_MONO_NEWOBJ: {
11441 MonoInst *iargs [2];
11443 CHECK_STACK_OVF (1);
11445 token = read32 (ip + 2);
11446 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11447 mono_class_init (klass);
11448 NEW_DOMAINCONST (cfg, iargs [0]);
11449 MONO_ADD_INS (cfg->cbb, iargs [0]);
11450 NEW_CLASSCONST (cfg, iargs [1], klass);
11451 MONO_ADD_INS (cfg->cbb, iargs [1]);
11452 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11454 inline_costs += 10 * num_calls++;
11457 case CEE_MONO_OBJADDR:
11460 MONO_INST_NEW (cfg, ins, OP_MOVE);
11461 ins->dreg = alloc_ireg_mp (cfg);
11462 ins->sreg1 = sp [0]->dreg;
11463 ins->type = STACK_MP;
11464 MONO_ADD_INS (cfg->cbb, ins);
11468 case CEE_MONO_LDNATIVEOBJ:
11470 * Similar to LDOBJ, but instead load the unmanaged
11471 * representation of the vtype to the stack.
11476 token = read32 (ip + 2);
11477 klass = mono_method_get_wrapper_data (method, token);
11478 g_assert (klass->valuetype);
11479 mono_class_init (klass);
11482 MonoInst *src, *dest, *temp;
11485 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11486 temp->backend.is_pinvoke = 1;
11487 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11488 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11490 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11491 dest->type = STACK_VTYPE;
11492 dest->klass = klass;
11498 case CEE_MONO_RETOBJ: {
11500 * Same as RET, but return the native representation of a vtype
11503 g_assert (cfg->ret);
11504 g_assert (mono_method_signature (method)->pinvoke);
11509 token = read32 (ip + 2);
11510 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11512 if (!cfg->vret_addr) {
11513 g_assert (cfg->ret_var_is_local);
11515 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11517 EMIT_NEW_RETLOADA (cfg, ins);
11519 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11521 if (sp != stack_start)
11524 MONO_INST_NEW (cfg, ins, OP_BR);
11525 ins->inst_target_bb = end_bblock;
11526 MONO_ADD_INS (bblock, ins);
11527 link_bblock (cfg, bblock, end_bblock);
11528 start_new_bblock = 1;
11532 case CEE_MONO_CISINST:
11533 case CEE_MONO_CCASTCLASS: {
11538 token = read32 (ip + 2);
11539 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11540 if (ip [1] == CEE_MONO_CISINST)
11541 ins = handle_cisinst (cfg, klass, sp [0]);
11543 ins = handle_ccastclass (cfg, klass, sp [0]);
11549 case CEE_MONO_SAVE_LMF:
11550 case CEE_MONO_RESTORE_LMF:
11551 #ifdef MONO_ARCH_HAVE_LMF_OPS
11552 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11553 MONO_ADD_INS (bblock, ins);
11554 cfg->need_lmf_area = TRUE;
11558 case CEE_MONO_CLASSCONST:
11559 CHECK_STACK_OVF (1);
11561 token = read32 (ip + 2);
11562 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11565 inline_costs += 10 * num_calls++;
11567 case CEE_MONO_NOT_TAKEN:
11568 bblock->out_of_line = TRUE;
11571 case CEE_MONO_TLS: {
11574 CHECK_STACK_OVF (1);
11576 key = (gint32)read32 (ip + 2);
11577 g_assert (key < TLS_KEY_NUM);
11579 ins = mono_create_tls_get (cfg, key);
11581 if (cfg->compile_aot) {
11583 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11584 ins->dreg = alloc_preg (cfg);
11585 ins->type = STACK_PTR;
11587 g_assert_not_reached ();
11590 ins->type = STACK_PTR;
11591 MONO_ADD_INS (bblock, ins);
11596 case CEE_MONO_DYN_CALL: {
11597 MonoCallInst *call;
11599 /* It would be easier to call a trampoline, but that would put an
11600 * extra frame on the stack, confusing exception handling. So
11601 * implement it inline using an opcode for now.
11604 if (!cfg->dyn_call_var) {
11605 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11606 /* prevent it from being register allocated */
11607 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11610 /* Has to use a call inst since it local regalloc expects it */
11611 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11612 ins = (MonoInst*)call;
11614 ins->sreg1 = sp [0]->dreg;
11615 ins->sreg2 = sp [1]->dreg;
11616 MONO_ADD_INS (bblock, ins);
11618 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11621 inline_costs += 10 * num_calls++;
11625 case CEE_MONO_MEMORY_BARRIER: {
11627 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11631 case CEE_MONO_JIT_ATTACH: {
11632 MonoInst *args [16], *domain_ins;
11633 MonoInst *ad_ins, *jit_tls_ins;
11634 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11636 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11638 EMIT_NEW_PCONST (cfg, ins, NULL);
11639 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11641 ad_ins = mono_get_domain_intrinsic (cfg);
11642 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
11644 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
11645 NEW_BBLOCK (cfg, next_bb);
11646 NEW_BBLOCK (cfg, call_bb);
11648 if (cfg->compile_aot) {
11649 /* AOT code is only used in the root domain */
11650 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11652 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11654 MONO_ADD_INS (cfg->cbb, ad_ins);
11655 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11656 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11658 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
11659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11660 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11662 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11663 MONO_START_BB (cfg, call_bb);
11666 if (cfg->compile_aot) {
11667 /* AOT code is only used in the root domain */
11668 EMIT_NEW_PCONST (cfg, args [0], NULL);
11670 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11672 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11673 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11676 MONO_START_BB (cfg, next_bb);
11682 case CEE_MONO_JIT_DETACH: {
11683 MonoInst *args [16];
11685 /* Restore the original domain */
11686 dreg = alloc_ireg (cfg);
11687 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11688 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11693 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11699 case CEE_PREFIX1: {
11702 case CEE_ARGLIST: {
11703 /* somewhat similar to LDTOKEN */
11704 MonoInst *addr, *vtvar;
11705 CHECK_STACK_OVF (1);
11706 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11708 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11709 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11711 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11712 ins->type = STACK_VTYPE;
11713 ins->klass = mono_defaults.argumenthandle_class;
11726 * The following transforms:
11727 * CEE_CEQ into OP_CEQ
11728 * CEE_CGT into OP_CGT
11729 * CEE_CGT_UN into OP_CGT_UN
11730 * CEE_CLT into OP_CLT
11731 * CEE_CLT_UN into OP_CLT_UN
11733 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11735 MONO_INST_NEW (cfg, ins, cmp->opcode);
11737 cmp->sreg1 = sp [0]->dreg;
11738 cmp->sreg2 = sp [1]->dreg;
11739 type_from_op (cmp, sp [0], sp [1]);
11741 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11742 cmp->opcode = OP_LCOMPARE;
11743 else if (sp [0]->type == STACK_R8)
11744 cmp->opcode = OP_FCOMPARE;
11746 cmp->opcode = OP_ICOMPARE;
11747 MONO_ADD_INS (bblock, cmp);
11748 ins->type = STACK_I4;
11749 ins->dreg = alloc_dreg (cfg, ins->type);
11750 type_from_op (ins, sp [0], sp [1]);
11752 if (cmp->opcode == OP_FCOMPARE) {
11754 * The backends expect the fceq opcodes to do the
11757 ins->sreg1 = cmp->sreg1;
11758 ins->sreg2 = cmp->sreg2;
11761 MONO_ADD_INS (bblock, ins);
11767 MonoInst *argconst;
11768 MonoMethod *cil_method;
11770 CHECK_STACK_OVF (1);
11772 n = read32 (ip + 2);
11773 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11774 if (!cmethod || mono_loader_get_last_error ())
11776 mono_class_init (cmethod->klass);
11778 mono_save_token_info (cfg, image, n, cmethod);
11780 context_used = mini_method_check_context_used (cfg, cmethod);
11782 cil_method = cmethod;
11783 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11784 METHOD_ACCESS_FAILURE (method, cil_method);
11786 if (mono_security_cas_enabled ()) {
11787 if (check_linkdemand (cfg, method, cmethod))
11788 INLINE_FAILURE ("linkdemand");
11789 CHECK_CFG_EXCEPTION;
11790 } else if (mono_security_core_clr_enabled ()) {
11791 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11795 * Optimize the common case of ldftn+delegate creation
11797 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11798 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11799 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11800 MonoInst *target_ins, *handle_ins;
11801 MonoMethod *invoke;
11802 int invoke_context_used;
11804 invoke = mono_get_delegate_invoke (ctor_method->klass);
11805 if (!invoke || !mono_method_signature (invoke))
11808 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11810 target_ins = sp [-1];
11812 if (mono_security_core_clr_enabled ())
11813 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11815 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11816 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11817 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11818 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11819 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11823 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11824 /* FIXME: SGEN support */
11825 if (invoke_context_used == 0) {
11827 if (cfg->verbose_level > 3)
11828 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11829 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
11832 CHECK_CFG_EXCEPTION;
11843 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11844 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11848 inline_costs += 10 * num_calls++;
11851 case CEE_LDVIRTFTN: {
11852 MonoInst *args [2];
11856 n = read32 (ip + 2);
11857 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11858 if (!cmethod || mono_loader_get_last_error ())
11860 mono_class_init (cmethod->klass);
11862 context_used = mini_method_check_context_used (cfg, cmethod);
11864 if (mono_security_cas_enabled ()) {
11865 if (check_linkdemand (cfg, method, cmethod))
11866 INLINE_FAILURE ("linkdemand");
11867 CHECK_CFG_EXCEPTION;
11868 } else if (mono_security_core_clr_enabled ()) {
11869 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11873 * Optimize the common case of ldvirtftn+delegate creation
11875 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
11876 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11877 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11878 MonoInst *target_ins, *handle_ins;
11879 MonoMethod *invoke;
11880 int invoke_context_used;
11882 invoke = mono_get_delegate_invoke (ctor_method->klass);
11883 if (!invoke || !mono_method_signature (invoke))
11886 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11888 target_ins = sp [-1];
11890 if (mono_security_core_clr_enabled ())
11891 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11893 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11894 /* FIXME: SGEN support */
11895 if (invoke_context_used == 0) {
11897 if (cfg->verbose_level > 3)
11898 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11899 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
11902 CHECK_CFG_EXCEPTION;
11916 args [1] = emit_get_rgctx_method (cfg, context_used,
11917 cmethod, MONO_RGCTX_INFO_METHOD);
11920 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11922 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11925 inline_costs += 10 * num_calls++;
11929 CHECK_STACK_OVF (1);
11931 n = read16 (ip + 2);
11933 EMIT_NEW_ARGLOAD (cfg, ins, n);
11938 CHECK_STACK_OVF (1);
11940 n = read16 (ip + 2);
11942 NEW_ARGLOADA (cfg, ins, n);
11943 MONO_ADD_INS (cfg->cbb, ins);
11951 n = read16 (ip + 2);
11953 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11955 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11959 CHECK_STACK_OVF (1);
11961 n = read16 (ip + 2);
11963 EMIT_NEW_LOCLOAD (cfg, ins, n);
11968 unsigned char *tmp_ip;
11969 CHECK_STACK_OVF (1);
11971 n = read16 (ip + 2);
11974 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11980 EMIT_NEW_LOCLOADA (cfg, ins, n);
11989 n = read16 (ip + 2);
11991 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11993 emit_stloc_ir (cfg, sp, header, n);
12000 if (sp != stack_start)
12002 if (cfg->method != method)
12004 * Inlining this into a loop in a parent could lead to
12005 * stack overflows which is different behavior than the
12006 * non-inlined case, thus disable inlining in this case.
12008 INLINE_FAILURE("localloc");
12010 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12011 ins->dreg = alloc_preg (cfg);
12012 ins->sreg1 = sp [0]->dreg;
12013 ins->type = STACK_PTR;
12014 MONO_ADD_INS (cfg->cbb, ins);
12016 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12018 ins->flags |= MONO_INST_INIT;
12023 case CEE_ENDFILTER: {
12024 MonoExceptionClause *clause, *nearest;
12025 int cc, nearest_num;
12029 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12031 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12032 ins->sreg1 = (*sp)->dreg;
12033 MONO_ADD_INS (bblock, ins);
12034 start_new_bblock = 1;
12039 for (cc = 0; cc < header->num_clauses; ++cc) {
12040 clause = &header->clauses [cc];
12041 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12042 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12043 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12048 g_assert (nearest);
12049 if ((ip - header->code) != nearest->handler_offset)
12054 case CEE_UNALIGNED_:
12055 ins_flag |= MONO_INST_UNALIGNED;
12056 /* FIXME: record alignment? we can assume 1 for now */
12060 case CEE_VOLATILE_:
12061 ins_flag |= MONO_INST_VOLATILE;
12065 ins_flag |= MONO_INST_TAILCALL;
12066 cfg->flags |= MONO_CFG_HAS_TAIL;
12067 /* Can't inline tail calls at this time */
12068 inline_costs += 100000;
12075 token = read32 (ip + 2);
12076 klass = mini_get_class (method, token, generic_context);
12077 CHECK_TYPELOAD (klass);
12078 if (generic_class_is_reference_type (cfg, klass))
12079 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12081 mini_emit_initobj (cfg, *sp, NULL, klass);
12085 case CEE_CONSTRAINED_:
12087 token = read32 (ip + 2);
12088 constrained_call = mini_get_class (method, token, generic_context);
12089 CHECK_TYPELOAD (constrained_call);
12093 case CEE_INITBLK: {
12094 MonoInst *iargs [3];
12098 /* Skip optimized paths for volatile operations. */
12099 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12100 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12101 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12102 /* emit_memset only works when val == 0 */
12103 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12106 iargs [0] = sp [0];
12107 iargs [1] = sp [1];
12108 iargs [2] = sp [2];
12109 if (ip [1] == CEE_CPBLK) {
12111 * FIXME: It's unclear whether we should be emitting both the acquire
12112 * and release barriers for cpblk. It is technically both a load and
12113 * store operation, so it seems like that's the sensible thing to do.
12115 MonoMethod *memcpy_method = get_memcpy_method ();
12116 if (ins_flag & MONO_INST_VOLATILE) {
12117 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12118 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12119 emit_memory_barrier (cfg, FullBarrier);
12121 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12122 call->flags |= ins_flag;
12123 if (ins_flag & MONO_INST_VOLATILE) {
12124 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
12125 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12126 emit_memory_barrier (cfg, FullBarrier);
12129 MonoMethod *memset_method = get_memset_method ();
12130 if (ins_flag & MONO_INST_VOLATILE) {
12131 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12132 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12133 emit_memory_barrier (cfg, FullBarrier);
12135 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12136 call->flags |= ins_flag;
12147 ins_flag |= MONO_INST_NOTYPECHECK;
12149 ins_flag |= MONO_INST_NORANGECHECK;
12150 /* we ignore the no-nullcheck for now since we
12151 * really do it explicitly only when doing callvirt->call
12155 case CEE_RETHROW: {
12157 int handler_offset = -1;
12159 for (i = 0; i < header->num_clauses; ++i) {
12160 MonoExceptionClause *clause = &header->clauses [i];
12161 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12162 handler_offset = clause->handler_offset;
12167 bblock->flags |= BB_EXCEPTION_UNSAFE;
12169 if (handler_offset == -1)
12172 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12173 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12174 ins->sreg1 = load->dreg;
12175 MONO_ADD_INS (bblock, ins);
12177 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12178 MONO_ADD_INS (bblock, ins);
12181 link_bblock (cfg, bblock, end_bblock);
12182 start_new_bblock = 1;
12190 CHECK_STACK_OVF (1);
12192 token = read32 (ip + 2);
12193 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12194 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12197 val = mono_type_size (type, &ialign);
12199 MonoClass *klass = mini_get_class (method, token, generic_context);
12200 CHECK_TYPELOAD (klass);
12202 val = mono_type_size (&klass->byval_arg, &ialign);
12204 if (mini_is_gsharedvt_klass (cfg, klass))
12205 GSHAREDVT_FAILURE (*ip);
12207 EMIT_NEW_ICONST (cfg, ins, val);
12212 case CEE_REFANYTYPE: {
12213 MonoInst *src_var, *src;
12215 GSHAREDVT_FAILURE (*ip);
12221 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12223 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12224 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12225 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12230 case CEE_READONLY_:
12243 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12253 g_warning ("opcode 0x%02x not handled", *ip);
12257 if (start_new_bblock != 1)
12260 bblock->cil_length = ip - bblock->cil_code;
12261 if (bblock->next_bb) {
12262 /* This could already be set because of inlining, #693905 */
12263 MonoBasicBlock *bb = bblock;
12265 while (bb->next_bb)
12267 bb->next_bb = end_bblock;
12269 bblock->next_bb = end_bblock;
12272 if (cfg->method == method && cfg->domainvar) {
12274 MonoInst *get_domain;
12276 cfg->cbb = init_localsbb;
12278 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12279 MONO_ADD_INS (cfg->cbb, get_domain);
12281 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12283 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12284 MONO_ADD_INS (cfg->cbb, store);
12287 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12288 if (cfg->compile_aot)
12289 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12290 mono_get_got_var (cfg);
12293 if (cfg->method == method && cfg->got_var)
12294 mono_emit_load_got_addr (cfg);
12296 if (init_localsbb) {
12297 cfg->cbb = init_localsbb;
12299 for (i = 0; i < header->num_locals; ++i) {
12300 emit_init_local (cfg, i, header->locals [i], init_locals);
12304 if (cfg->init_ref_vars && cfg->method == method) {
12305 /* Emit initialization for ref vars */
12306 // FIXME: Avoid duplication initialization for IL locals.
12307 for (i = 0; i < cfg->num_varinfo; ++i) {
12308 MonoInst *ins = cfg->varinfo [i];
12310 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12311 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12315 if (cfg->lmf_var && cfg->method == method) {
12316 cfg->cbb = init_localsbb;
12317 emit_push_lmf (cfg);
12320 cfg->cbb = init_localsbb;
12321 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12324 MonoBasicBlock *bb;
12327 * Make seq points at backward branch targets interruptable.
12329 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12330 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12331 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12334 /* Add a sequence point for method entry/exit events */
12336 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12337 MONO_ADD_INS (init_localsbb, ins);
12338 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12339 MONO_ADD_INS (cfg->bb_exit, ins);
12343 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12344 * the code they refer to was dead (#11880).
12346 if (sym_seq_points) {
12347 for (i = 0; i < header->code_size; ++i) {
12348 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12351 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12352 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12359 if (cfg->method == method) {
12360 MonoBasicBlock *bb;
12361 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12362 bb->region = mono_find_block_region (cfg, bb->real_offset);
12364 mono_create_spvar_for_region (cfg, bb->region);
12365 if (cfg->verbose_level > 2)
12366 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12370 if (inline_costs < 0) {
12373 /* Method is too large */
12374 mname = mono_method_full_name (method, TRUE);
12375 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12376 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12380 if ((cfg->verbose_level > 2) && (cfg->method == method))
12381 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12386 g_assert (!mono_error_ok (&cfg->error));
12390 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12394 set_exception_type_from_invalid_il (cfg, method, ip);
12398 g_slist_free (class_inits);
12399 mono_basic_block_free (original_bb);
12400 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12401 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12402 if (cfg->exception_type)
12405 return inline_costs;
12409 store_membase_reg_to_store_membase_imm (int opcode)
12412 case OP_STORE_MEMBASE_REG:
12413 return OP_STORE_MEMBASE_IMM;
12414 case OP_STOREI1_MEMBASE_REG:
12415 return OP_STOREI1_MEMBASE_IMM;
12416 case OP_STOREI2_MEMBASE_REG:
12417 return OP_STOREI2_MEMBASE_IMM;
12418 case OP_STOREI4_MEMBASE_REG:
12419 return OP_STOREI4_MEMBASE_IMM;
12420 case OP_STOREI8_MEMBASE_REG:
12421 return OP_STOREI8_MEMBASE_IMM;
12423 g_assert_not_reached ();
12430 mono_op_to_op_imm (int opcode)
12434 return OP_IADD_IMM;
12436 return OP_ISUB_IMM;
12438 return OP_IDIV_IMM;
12440 return OP_IDIV_UN_IMM;
12442 return OP_IREM_IMM;
12444 return OP_IREM_UN_IMM;
12446 return OP_IMUL_IMM;
12448 return OP_IAND_IMM;
12452 return OP_IXOR_IMM;
12454 return OP_ISHL_IMM;
12456 return OP_ISHR_IMM;
12458 return OP_ISHR_UN_IMM;
12461 return OP_LADD_IMM;
12463 return OP_LSUB_IMM;
12465 return OP_LAND_IMM;
12469 return OP_LXOR_IMM;
12471 return OP_LSHL_IMM;
12473 return OP_LSHR_IMM;
12475 return OP_LSHR_UN_IMM;
12476 #if SIZEOF_REGISTER == 8
12478 return OP_LREM_IMM;
12482 return OP_COMPARE_IMM;
12484 return OP_ICOMPARE_IMM;
12486 return OP_LCOMPARE_IMM;
12488 case OP_STORE_MEMBASE_REG:
12489 return OP_STORE_MEMBASE_IMM;
12490 case OP_STOREI1_MEMBASE_REG:
12491 return OP_STOREI1_MEMBASE_IMM;
12492 case OP_STOREI2_MEMBASE_REG:
12493 return OP_STOREI2_MEMBASE_IMM;
12494 case OP_STOREI4_MEMBASE_REG:
12495 return OP_STOREI4_MEMBASE_IMM;
12497 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12499 return OP_X86_PUSH_IMM;
12500 case OP_X86_COMPARE_MEMBASE_REG:
12501 return OP_X86_COMPARE_MEMBASE_IMM;
12503 #if defined(TARGET_AMD64)
12504 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12505 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12507 case OP_VOIDCALL_REG:
12508 return OP_VOIDCALL;
12516 return OP_LOCALLOC_IMM;
12523 ldind_to_load_membase (int opcode)
12527 return OP_LOADI1_MEMBASE;
12529 return OP_LOADU1_MEMBASE;
12531 return OP_LOADI2_MEMBASE;
12533 return OP_LOADU2_MEMBASE;
12535 return OP_LOADI4_MEMBASE;
12537 return OP_LOADU4_MEMBASE;
12539 return OP_LOAD_MEMBASE;
12540 case CEE_LDIND_REF:
12541 return OP_LOAD_MEMBASE;
12543 return OP_LOADI8_MEMBASE;
12545 return OP_LOADR4_MEMBASE;
12547 return OP_LOADR8_MEMBASE;
12549 g_assert_not_reached ();
12556 stind_to_store_membase (int opcode)
12560 return OP_STOREI1_MEMBASE_REG;
12562 return OP_STOREI2_MEMBASE_REG;
12564 return OP_STOREI4_MEMBASE_REG;
12566 case CEE_STIND_REF:
12567 return OP_STORE_MEMBASE_REG;
12569 return OP_STOREI8_MEMBASE_REG;
12571 return OP_STORER4_MEMBASE_REG;
12573 return OP_STORER8_MEMBASE_REG;
12575 g_assert_not_reached ();
12582 mono_load_membase_to_load_mem (int opcode)
12584 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12585 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12587 case OP_LOAD_MEMBASE:
12588 return OP_LOAD_MEM;
12589 case OP_LOADU1_MEMBASE:
12590 return OP_LOADU1_MEM;
12591 case OP_LOADU2_MEMBASE:
12592 return OP_LOADU2_MEM;
12593 case OP_LOADI4_MEMBASE:
12594 return OP_LOADI4_MEM;
12595 case OP_LOADU4_MEMBASE:
12596 return OP_LOADU4_MEM;
12597 #if SIZEOF_REGISTER == 8
12598 case OP_LOADI8_MEMBASE:
12599 return OP_LOADI8_MEM;
12608 op_to_op_dest_membase (int store_opcode, int opcode)
12610 #if defined(TARGET_X86)
12611 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12616 return OP_X86_ADD_MEMBASE_REG;
12618 return OP_X86_SUB_MEMBASE_REG;
12620 return OP_X86_AND_MEMBASE_REG;
12622 return OP_X86_OR_MEMBASE_REG;
12624 return OP_X86_XOR_MEMBASE_REG;
12627 return OP_X86_ADD_MEMBASE_IMM;
12630 return OP_X86_SUB_MEMBASE_IMM;
12633 return OP_X86_AND_MEMBASE_IMM;
12636 return OP_X86_OR_MEMBASE_IMM;
12639 return OP_X86_XOR_MEMBASE_IMM;
12645 #if defined(TARGET_AMD64)
12646 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12651 return OP_X86_ADD_MEMBASE_REG;
12653 return OP_X86_SUB_MEMBASE_REG;
12655 return OP_X86_AND_MEMBASE_REG;
12657 return OP_X86_OR_MEMBASE_REG;
12659 return OP_X86_XOR_MEMBASE_REG;
12661 return OP_X86_ADD_MEMBASE_IMM;
12663 return OP_X86_SUB_MEMBASE_IMM;
12665 return OP_X86_AND_MEMBASE_IMM;
12667 return OP_X86_OR_MEMBASE_IMM;
12669 return OP_X86_XOR_MEMBASE_IMM;
12671 return OP_AMD64_ADD_MEMBASE_REG;
12673 return OP_AMD64_SUB_MEMBASE_REG;
12675 return OP_AMD64_AND_MEMBASE_REG;
12677 return OP_AMD64_OR_MEMBASE_REG;
12679 return OP_AMD64_XOR_MEMBASE_REG;
12682 return OP_AMD64_ADD_MEMBASE_IMM;
12685 return OP_AMD64_SUB_MEMBASE_IMM;
12688 return OP_AMD64_AND_MEMBASE_IMM;
12691 return OP_AMD64_OR_MEMBASE_IMM;
12694 return OP_AMD64_XOR_MEMBASE_IMM;
12704 op_to_op_store_membase (int store_opcode, int opcode)
12706 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12709 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12710 return OP_X86_SETEQ_MEMBASE;
12712 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12713 return OP_X86_SETNE_MEMBASE;
12721 op_to_op_src1_membase (int load_opcode, int opcode)
12724 /* FIXME: This has sign extension issues */
12726 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12727 return OP_X86_COMPARE_MEMBASE8_IMM;
12730 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12735 return OP_X86_PUSH_MEMBASE;
12736 case OP_COMPARE_IMM:
12737 case OP_ICOMPARE_IMM:
12738 return OP_X86_COMPARE_MEMBASE_IMM;
12741 return OP_X86_COMPARE_MEMBASE_REG;
12745 #ifdef TARGET_AMD64
12746 /* FIXME: This has sign extension issues */
12748 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12749 return OP_X86_COMPARE_MEMBASE8_IMM;
12754 #ifdef __mono_ilp32__
12755 if (load_opcode == OP_LOADI8_MEMBASE)
12757 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12759 return OP_X86_PUSH_MEMBASE;
12761 /* FIXME: This only works for 32 bit immediates
12762 case OP_COMPARE_IMM:
12763 case OP_LCOMPARE_IMM:
12764 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12765 return OP_AMD64_COMPARE_MEMBASE_IMM;
12767 case OP_ICOMPARE_IMM:
12768 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12769 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12773 #ifdef __mono_ilp32__
12774 if (load_opcode == OP_LOAD_MEMBASE)
12775 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12776 if (load_opcode == OP_LOADI8_MEMBASE)
12778 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12780 return OP_AMD64_COMPARE_MEMBASE_REG;
12783 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12784 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12793 op_to_op_src2_membase (int load_opcode, int opcode)
12796 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12802 return OP_X86_COMPARE_REG_MEMBASE;
12804 return OP_X86_ADD_REG_MEMBASE;
12806 return OP_X86_SUB_REG_MEMBASE;
12808 return OP_X86_AND_REG_MEMBASE;
12810 return OP_X86_OR_REG_MEMBASE;
12812 return OP_X86_XOR_REG_MEMBASE;
12816 #ifdef TARGET_AMD64
12817 #ifdef __mono_ilp32__
12818 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12820 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12824 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12826 return OP_X86_ADD_REG_MEMBASE;
12828 return OP_X86_SUB_REG_MEMBASE;
12830 return OP_X86_AND_REG_MEMBASE;
12832 return OP_X86_OR_REG_MEMBASE;
12834 return OP_X86_XOR_REG_MEMBASE;
12836 #ifdef __mono_ilp32__
12837 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12839 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12844 return OP_AMD64_COMPARE_REG_MEMBASE;
12846 return OP_AMD64_ADD_REG_MEMBASE;
12848 return OP_AMD64_SUB_REG_MEMBASE;
12850 return OP_AMD64_AND_REG_MEMBASE;
12852 return OP_AMD64_OR_REG_MEMBASE;
12854 return OP_AMD64_XOR_REG_MEMBASE;
12863 mono_op_to_op_imm_noemul (int opcode)
12866 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12872 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12879 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12884 return mono_op_to_op_imm (opcode);
12889 * mono_handle_global_vregs:
12891 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12895 mono_handle_global_vregs (MonoCompile *cfg)
12897 gint32 *vreg_to_bb;
12898 MonoBasicBlock *bb;
12901 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12903 #ifdef MONO_ARCH_SIMD_INTRINSICS
12904 if (cfg->uses_simd_intrinsics)
12905 mono_simd_simplify_indirection (cfg);
12908 /* Find local vregs used in more than one bb */
12909 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12910 MonoInst *ins = bb->code;
12911 int block_num = bb->block_num;
12913 if (cfg->verbose_level > 2)
12914 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12917 for (; ins; ins = ins->next) {
12918 const char *spec = INS_INFO (ins->opcode);
12919 int regtype = 0, regindex;
12922 if (G_UNLIKELY (cfg->verbose_level > 2))
12923 mono_print_ins (ins);
12925 g_assert (ins->opcode >= MONO_CEE_LAST);
12927 for (regindex = 0; regindex < 4; regindex ++) {
12930 if (regindex == 0) {
12931 regtype = spec [MONO_INST_DEST];
12932 if (regtype == ' ')
12935 } else if (regindex == 1) {
12936 regtype = spec [MONO_INST_SRC1];
12937 if (regtype == ' ')
12940 } else if (regindex == 2) {
12941 regtype = spec [MONO_INST_SRC2];
12942 if (regtype == ' ')
12945 } else if (regindex == 3) {
12946 regtype = spec [MONO_INST_SRC3];
12947 if (regtype == ' ')
12952 #if SIZEOF_REGISTER == 4
12953 /* In the LLVM case, the long opcodes are not decomposed */
12954 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12956 * Since some instructions reference the original long vreg,
12957 * and some reference the two component vregs, it is quite hard
12958 * to determine when it needs to be global. So be conservative.
12960 if (!get_vreg_to_inst (cfg, vreg)) {
12961 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12963 if (cfg->verbose_level > 2)
12964 printf ("LONG VREG R%d made global.\n", vreg);
12968 * Make the component vregs volatile since the optimizations can
12969 * get confused otherwise.
12971 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12972 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12976 g_assert (vreg != -1);
12978 prev_bb = vreg_to_bb [vreg];
12979 if (prev_bb == 0) {
12980 /* 0 is a valid block num */
12981 vreg_to_bb [vreg] = block_num + 1;
12982 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12983 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12986 if (!get_vreg_to_inst (cfg, vreg)) {
12987 if (G_UNLIKELY (cfg->verbose_level > 2))
12988 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12992 if (vreg_is_ref (cfg, vreg))
12993 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12995 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12998 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13001 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13004 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13007 g_assert_not_reached ();
13011 /* Flag as having been used in more than one bb */
13012 vreg_to_bb [vreg] = -1;
13018 /* If a variable is used in only one bblock, convert it into a local vreg */
13019 for (i = 0; i < cfg->num_varinfo; i++) {
13020 MonoInst *var = cfg->varinfo [i];
13021 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13023 switch (var->type) {
13029 #if SIZEOF_REGISTER == 8
13032 #if !defined(TARGET_X86)
13033 /* Enabling this screws up the fp stack on x86 */
13036 if (mono_arch_is_soft_float ())
13039 /* Arguments are implicitly global */
13040 /* Putting R4 vars into registers doesn't work currently */
13041 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13042 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13044 * Make that the variable's liveness interval doesn't contain a call, since
13045 * that would cause the lvreg to be spilled, making the whole optimization
13048 /* This is too slow for JIT compilation */
13050 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13052 int def_index, call_index, ins_index;
13053 gboolean spilled = FALSE;
13058 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13059 const char *spec = INS_INFO (ins->opcode);
13061 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13062 def_index = ins_index;
13064 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13065 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13066 if (call_index > def_index) {
13072 if (MONO_IS_CALL (ins))
13073 call_index = ins_index;
13083 if (G_UNLIKELY (cfg->verbose_level > 2))
13084 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13085 var->flags |= MONO_INST_IS_DEAD;
13086 cfg->vreg_to_inst [var->dreg] = NULL;
13093 * Compress the varinfo and vars tables so the liveness computation is faster and
13094 * takes up less space.
13097 for (i = 0; i < cfg->num_varinfo; ++i) {
13098 MonoInst *var = cfg->varinfo [i];
13099 if (pos < i && cfg->locals_start == i)
13100 cfg->locals_start = pos;
13101 if (!(var->flags & MONO_INST_IS_DEAD)) {
13103 cfg->varinfo [pos] = cfg->varinfo [i];
13104 cfg->varinfo [pos]->inst_c0 = pos;
13105 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13106 cfg->vars [pos].idx = pos;
13107 #if SIZEOF_REGISTER == 4
13108 if (cfg->varinfo [pos]->type == STACK_I8) {
13109 /* Modify the two component vars too */
13112 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13113 var1->inst_c0 = pos;
13114 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13115 var1->inst_c0 = pos;
13122 cfg->num_varinfo = pos;
13123 if (cfg->locals_start > cfg->num_varinfo)
13124 cfg->locals_start = cfg->num_varinfo;
13128 * mono_spill_global_vars:
13130 * Generate spill code for variables which are not allocated to registers,
13131 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13132 * code is generated which could be optimized by the local optimization passes.
13135 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13137 MonoBasicBlock *bb;
13139 int orig_next_vreg;
13140 guint32 *vreg_to_lvreg;
13142 guint32 i, lvregs_len;
13143 gboolean dest_has_lvreg = FALSE;
13144 guint32 stacktypes [128];
13145 MonoInst **live_range_start, **live_range_end;
13146 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13147 int *gsharedvt_vreg_to_idx = NULL;
13149 *need_local_opts = FALSE;
13151 memset (spec2, 0, sizeof (spec2));
13153 /* FIXME: Move this function to mini.c */
13154 stacktypes ['i'] = STACK_PTR;
13155 stacktypes ['l'] = STACK_I8;
13156 stacktypes ['f'] = STACK_R8;
13157 #ifdef MONO_ARCH_SIMD_INTRINSICS
13158 stacktypes ['x'] = STACK_VTYPE;
13161 #if SIZEOF_REGISTER == 4
13162 /* Create MonoInsts for longs */
13163 for (i = 0; i < cfg->num_varinfo; i++) {
13164 MonoInst *ins = cfg->varinfo [i];
13166 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13167 switch (ins->type) {
13172 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13175 g_assert (ins->opcode == OP_REGOFFSET);
13177 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13179 tree->opcode = OP_REGOFFSET;
13180 tree->inst_basereg = ins->inst_basereg;
13181 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13183 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13185 tree->opcode = OP_REGOFFSET;
13186 tree->inst_basereg = ins->inst_basereg;
13187 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13197 if (cfg->compute_gc_maps) {
13198 /* registers need liveness info even for !non refs */
13199 for (i = 0; i < cfg->num_varinfo; i++) {
13200 MonoInst *ins = cfg->varinfo [i];
13202 if (ins->opcode == OP_REGVAR)
13203 ins->flags |= MONO_INST_GC_TRACK;
13207 if (cfg->gsharedvt) {
13208 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13210 for (i = 0; i < cfg->num_varinfo; ++i) {
13211 MonoInst *ins = cfg->varinfo [i];
13214 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13215 if (i >= cfg->locals_start) {
13217 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13218 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13219 ins->opcode = OP_GSHAREDVT_LOCAL;
13220 ins->inst_imm = idx;
13223 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13224 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13230 /* FIXME: widening and truncation */
13233 * As an optimization, when a variable allocated to the stack is first loaded into
13234 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13235 * the variable again.
13237 orig_next_vreg = cfg->next_vreg;
13238 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13239 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13243 * These arrays contain the first and last instructions accessing a given
13245 * Since we emit bblocks in the same order we process them here, and we
13246 * don't split live ranges, these will precisely describe the live range of
13247 * the variable, i.e. the instruction range where a valid value can be found
13248 * in the variables location.
13249 * The live range is computed using the liveness info computed by the liveness pass.
13250 * We can't use vmv->range, since that is an abstract live range, and we need
13251 * one which is instruction precise.
13252 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13254 /* FIXME: Only do this if debugging info is requested */
13255 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13256 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13257 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13258 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13260 /* Add spill loads/stores */
13261 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13264 if (cfg->verbose_level > 2)
13265 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13267 /* Clear vreg_to_lvreg array */
13268 for (i = 0; i < lvregs_len; i++)
13269 vreg_to_lvreg [lvregs [i]] = 0;
13273 MONO_BB_FOR_EACH_INS (bb, ins) {
13274 const char *spec = INS_INFO (ins->opcode);
13275 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13276 gboolean store, no_lvreg;
13277 int sregs [MONO_MAX_SRC_REGS];
13279 if (G_UNLIKELY (cfg->verbose_level > 2))
13280 mono_print_ins (ins);
13282 if (ins->opcode == OP_NOP)
13286 * We handle LDADDR here as well, since it can only be decomposed
13287 * when variable addresses are known.
13289 if (ins->opcode == OP_LDADDR) {
13290 MonoInst *var = ins->inst_p0;
13292 if (var->opcode == OP_VTARG_ADDR) {
13293 /* Happens on SPARC/S390 where vtypes are passed by reference */
13294 MonoInst *vtaddr = var->inst_left;
13295 if (vtaddr->opcode == OP_REGVAR) {
13296 ins->opcode = OP_MOVE;
13297 ins->sreg1 = vtaddr->dreg;
13299 else if (var->inst_left->opcode == OP_REGOFFSET) {
13300 ins->opcode = OP_LOAD_MEMBASE;
13301 ins->inst_basereg = vtaddr->inst_basereg;
13302 ins->inst_offset = vtaddr->inst_offset;
13305 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13306 /* gsharedvt arg passed by ref */
13307 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13309 ins->opcode = OP_LOAD_MEMBASE;
13310 ins->inst_basereg = var->inst_basereg;
13311 ins->inst_offset = var->inst_offset;
13312 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13313 MonoInst *load, *load2, *load3;
13314 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13315 int reg1, reg2, reg3;
13316 MonoInst *info_var = cfg->gsharedvt_info_var;
13317 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13321 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13324 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13326 g_assert (info_var);
13327 g_assert (locals_var);
13329 /* Mark the instruction used to compute the locals var as used */
13330 cfg->gsharedvt_locals_var_ins = NULL;
13332 /* Load the offset */
13333 if (info_var->opcode == OP_REGOFFSET) {
13334 reg1 = alloc_ireg (cfg);
13335 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13336 } else if (info_var->opcode == OP_REGVAR) {
13338 reg1 = info_var->dreg;
13340 g_assert_not_reached ();
13342 reg2 = alloc_ireg (cfg);
13343 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13344 /* Load the locals area address */
13345 reg3 = alloc_ireg (cfg);
13346 if (locals_var->opcode == OP_REGOFFSET) {
13347 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13348 } else if (locals_var->opcode == OP_REGVAR) {
13349 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13351 g_assert_not_reached ();
13353 /* Compute the address */
13354 ins->opcode = OP_PADD;
13358 mono_bblock_insert_before_ins (bb, ins, load3);
13359 mono_bblock_insert_before_ins (bb, load3, load2);
13361 mono_bblock_insert_before_ins (bb, load2, load);
13363 g_assert (var->opcode == OP_REGOFFSET);
13365 ins->opcode = OP_ADD_IMM;
13366 ins->sreg1 = var->inst_basereg;
13367 ins->inst_imm = var->inst_offset;
13370 *need_local_opts = TRUE;
13371 spec = INS_INFO (ins->opcode);
13374 if (ins->opcode < MONO_CEE_LAST) {
13375 mono_print_ins (ins);
13376 g_assert_not_reached ();
13380 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13384 if (MONO_IS_STORE_MEMBASE (ins)) {
13385 tmp_reg = ins->dreg;
13386 ins->dreg = ins->sreg2;
13387 ins->sreg2 = tmp_reg;
13390 spec2 [MONO_INST_DEST] = ' ';
13391 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13392 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13393 spec2 [MONO_INST_SRC3] = ' ';
13395 } else if (MONO_IS_STORE_MEMINDEX (ins))
13396 g_assert_not_reached ();
13401 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13402 printf ("\t %.3s %d", spec, ins->dreg);
13403 num_sregs = mono_inst_get_src_registers (ins, sregs);
13404 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13405 printf (" %d", sregs [srcindex]);
13412 regtype = spec [MONO_INST_DEST];
13413 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13416 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13417 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13418 MonoInst *store_ins;
13420 MonoInst *def_ins = ins;
13421 int dreg = ins->dreg; /* The original vreg */
13423 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13425 if (var->opcode == OP_REGVAR) {
13426 ins->dreg = var->dreg;
13427 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13429 * Instead of emitting a load+store, use a _membase opcode.
13431 g_assert (var->opcode == OP_REGOFFSET);
13432 if (ins->opcode == OP_MOVE) {
13436 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13437 ins->inst_basereg = var->inst_basereg;
13438 ins->inst_offset = var->inst_offset;
13441 spec = INS_INFO (ins->opcode);
13445 g_assert (var->opcode == OP_REGOFFSET);
13447 prev_dreg = ins->dreg;
13449 /* Invalidate any previous lvreg for this vreg */
13450 vreg_to_lvreg [ins->dreg] = 0;
13454 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13456 store_opcode = OP_STOREI8_MEMBASE_REG;
13459 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13461 #if SIZEOF_REGISTER != 8
13462 if (regtype == 'l') {
13463 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13464 mono_bblock_insert_after_ins (bb, ins, store_ins);
13465 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13466 mono_bblock_insert_after_ins (bb, ins, store_ins);
13467 def_ins = store_ins;
13472 g_assert (store_opcode != OP_STOREV_MEMBASE);
13474 /* Try to fuse the store into the instruction itself */
13475 /* FIXME: Add more instructions */
13476 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13477 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13478 ins->inst_imm = ins->inst_c0;
13479 ins->inst_destbasereg = var->inst_basereg;
13480 ins->inst_offset = var->inst_offset;
13481 spec = INS_INFO (ins->opcode);
13482 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13483 ins->opcode = store_opcode;
13484 ins->inst_destbasereg = var->inst_basereg;
13485 ins->inst_offset = var->inst_offset;
13489 tmp_reg = ins->dreg;
13490 ins->dreg = ins->sreg2;
13491 ins->sreg2 = tmp_reg;
13494 spec2 [MONO_INST_DEST] = ' ';
13495 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13496 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13497 spec2 [MONO_INST_SRC3] = ' ';
13499 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13500 // FIXME: The backends expect the base reg to be in inst_basereg
13501 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13503 ins->inst_basereg = var->inst_basereg;
13504 ins->inst_offset = var->inst_offset;
13505 spec = INS_INFO (ins->opcode);
13507 /* printf ("INS: "); mono_print_ins (ins); */
13508 /* Create a store instruction */
13509 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13511 /* Insert it after the instruction */
13512 mono_bblock_insert_after_ins (bb, ins, store_ins);
13514 def_ins = store_ins;
13517 * We can't assign ins->dreg to var->dreg here, since the
13518 * sregs could use it. So set a flag, and do it after
13521 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13522 dest_has_lvreg = TRUE;
13527 if (def_ins && !live_range_start [dreg]) {
13528 live_range_start [dreg] = def_ins;
13529 live_range_start_bb [dreg] = bb;
13532 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13535 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13536 tmp->inst_c1 = dreg;
13537 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13544 num_sregs = mono_inst_get_src_registers (ins, sregs);
13545 for (srcindex = 0; srcindex < 3; ++srcindex) {
13546 regtype = spec [MONO_INST_SRC1 + srcindex];
13547 sreg = sregs [srcindex];
13549 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13550 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13551 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13552 MonoInst *use_ins = ins;
13553 MonoInst *load_ins;
13554 guint32 load_opcode;
13556 if (var->opcode == OP_REGVAR) {
13557 sregs [srcindex] = var->dreg;
13558 //mono_inst_set_src_registers (ins, sregs);
13559 live_range_end [sreg] = use_ins;
13560 live_range_end_bb [sreg] = bb;
13562 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13565 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13566 /* var->dreg is a hreg */
13567 tmp->inst_c1 = sreg;
13568 mono_bblock_insert_after_ins (bb, ins, tmp);
13574 g_assert (var->opcode == OP_REGOFFSET);
13576 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13578 g_assert (load_opcode != OP_LOADV_MEMBASE);
13580 if (vreg_to_lvreg [sreg]) {
13581 g_assert (vreg_to_lvreg [sreg] != -1);
13583 /* The variable is already loaded to an lvreg */
13584 if (G_UNLIKELY (cfg->verbose_level > 2))
13585 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13586 sregs [srcindex] = vreg_to_lvreg [sreg];
13587 //mono_inst_set_src_registers (ins, sregs);
13591 /* Try to fuse the load into the instruction */
13592 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13593 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13594 sregs [0] = var->inst_basereg;
13595 //mono_inst_set_src_registers (ins, sregs);
13596 ins->inst_offset = var->inst_offset;
13597 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13598 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13599 sregs [1] = var->inst_basereg;
13600 //mono_inst_set_src_registers (ins, sregs);
13601 ins->inst_offset = var->inst_offset;
13603 if (MONO_IS_REAL_MOVE (ins)) {
13604 ins->opcode = OP_NOP;
13607 //printf ("%d ", srcindex); mono_print_ins (ins);
13609 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13611 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13612 if (var->dreg == prev_dreg) {
13614 * sreg refers to the value loaded by the load
13615 * emitted below, but we need to use ins->dreg
13616 * since it refers to the store emitted earlier.
13620 g_assert (sreg != -1);
13621 vreg_to_lvreg [var->dreg] = sreg;
13622 g_assert (lvregs_len < 1024);
13623 lvregs [lvregs_len ++] = var->dreg;
13627 sregs [srcindex] = sreg;
13628 //mono_inst_set_src_registers (ins, sregs);
13630 #if SIZEOF_REGISTER != 8
13631 if (regtype == 'l') {
13632 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13633 mono_bblock_insert_before_ins (bb, ins, load_ins);
13634 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13635 mono_bblock_insert_before_ins (bb, ins, load_ins);
13636 use_ins = load_ins;
13641 #if SIZEOF_REGISTER == 4
13642 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13644 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13645 mono_bblock_insert_before_ins (bb, ins, load_ins);
13646 use_ins = load_ins;
13650 if (var->dreg < orig_next_vreg) {
13651 live_range_end [var->dreg] = use_ins;
13652 live_range_end_bb [var->dreg] = bb;
13655 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13658 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13659 tmp->inst_c1 = var->dreg;
13660 mono_bblock_insert_after_ins (bb, ins, tmp);
13664 mono_inst_set_src_registers (ins, sregs);
13666 if (dest_has_lvreg) {
13667 g_assert (ins->dreg != -1);
13668 vreg_to_lvreg [prev_dreg] = ins->dreg;
13669 g_assert (lvregs_len < 1024);
13670 lvregs [lvregs_len ++] = prev_dreg;
13671 dest_has_lvreg = FALSE;
13675 tmp_reg = ins->dreg;
13676 ins->dreg = ins->sreg2;
13677 ins->sreg2 = tmp_reg;
13680 if (MONO_IS_CALL (ins)) {
13681 /* Clear vreg_to_lvreg array */
13682 for (i = 0; i < lvregs_len; i++)
13683 vreg_to_lvreg [lvregs [i]] = 0;
13685 } else if (ins->opcode == OP_NOP) {
13687 MONO_INST_NULLIFY_SREGS (ins);
13690 if (cfg->verbose_level > 2)
13691 mono_print_ins_index (1, ins);
13694 /* Extend the live range based on the liveness info */
13695 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13696 for (i = 0; i < cfg->num_varinfo; i ++) {
13697 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13699 if (vreg_is_volatile (cfg, vi->vreg))
13700 /* The liveness info is incomplete */
13703 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13704 /* Live from at least the first ins of this bb */
13705 live_range_start [vi->vreg] = bb->code;
13706 live_range_start_bb [vi->vreg] = bb;
13709 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13710 /* Live at least until the last ins of this bb */
13711 live_range_end [vi->vreg] = bb->last_ins;
13712 live_range_end_bb [vi->vreg] = bb;
13718 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13720 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13721 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13723 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13724 for (i = 0; i < cfg->num_varinfo; ++i) {
13725 int vreg = MONO_VARINFO (cfg, i)->vreg;
13728 if (live_range_start [vreg]) {
13729 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13731 ins->inst_c1 = vreg;
13732 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13734 if (live_range_end [vreg]) {
13735 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13737 ins->inst_c1 = vreg;
13738 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13739 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13741 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13747 if (cfg->gsharedvt_locals_var_ins) {
13748 /* Nullify if unused */
13749 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13750 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13753 g_free (live_range_start);
13754 g_free (live_range_end);
13755 g_free (live_range_start_bb);
13756 g_free (live_range_end_bb);
13761 * - use 'iadd' instead of 'int_add'
13762 * - handling ovf opcodes: decompose in method_to_ir.
13763 * - unify iregs/fregs
13764 * -> partly done, the missing parts are:
13765 * - a more complete unification would involve unifying the hregs as well, so
13766 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13767 * would no longer map to the machine hregs, so the code generators would need to
13768 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13769 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13770 * fp/non-fp branches speeds it up by about 15%.
13771 * - use sext/zext opcodes instead of shifts
13773 * - get rid of TEMPLOADs if possible and use vregs instead
13774 * - clean up usage of OP_P/OP_ opcodes
13775 * - cleanup usage of DUMMY_USE
13776 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13778 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13779 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13780 * - make sure handle_stack_args () is called before the branch is emitted
13781 * - when the new IR is done, get rid of all unused stuff
13782 * - COMPARE/BEQ as separate instructions or unify them ?
13783 * - keeping them separate allows specialized compare instructions like
13784 * compare_imm, compare_membase
13785 * - most back ends unify fp compare+branch, fp compare+ceq
13786 * - integrate mono_save_args into inline_method
13787 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13788 * - handle long shift opts on 32 bit platforms somehow: they require
13789 * 3 sregs (2 for arg1 and 1 for arg2)
13790 * - make byref a 'normal' type.
13791 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13792 * variable if needed.
13793 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13794 * like inline_method.
13795 * - remove inlining restrictions
13796 * - fix LNEG and enable cfold of INEG
13797 * - generalize x86 optimizations like ldelema as a peephole optimization
13798 * - add store_mem_imm for amd64
13799 * - optimize the loading of the interruption flag in the managed->native wrappers
13800 * - avoid special handling of OP_NOP in passes
13801 * - move code inserting instructions into one function/macro.
13802 * - try a coalescing phase after liveness analysis
13803 * - add float -> vreg conversion + local optimizations on !x86
13804 * - figure out how to handle decomposed branches during optimizations, ie.
13805 * compare+branch, op_jump_table+op_br etc.
13806 * - promote RuntimeXHandles to vregs
13807 * - vtype cleanups:
13808 * - add a NEW_VARLOADA_VREG macro
13809 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13810 * accessing vtype fields.
13811 * - get rid of I8CONST on 64 bit platforms
13812 * - dealing with the increase in code size due to branches created during opcode
13814 * - use extended basic blocks
13815 * - all parts of the JIT
13816 * - handle_global_vregs () && local regalloc
13817 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13818 * - sources of increase in code size:
13821 * - isinst and castclass
13822 * - lvregs not allocated to global registers even if used multiple times
13823 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13825 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13826 * - add all micro optimizations from the old JIT
13827 * - put tree optimizations into the deadce pass
13828 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13829 * specific function.
13830 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13831 * fcompare + branchCC.
13832 * - create a helper function for allocating a stack slot, taking into account
13833 * MONO_CFG_HAS_SPILLUP.
13835 * - merge the ia64 switch changes.
13836 * - optimize mono_regstate2_alloc_int/float.
13837 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13838 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13839 * parts of the tree could be separated by other instructions, killing the tree
13840 * arguments, or stores killing loads etc. Also, should we fold loads into other
13841 * instructions if the result of the load is used multiple times ?
13842 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13843 * - LAST MERGE: 108395.
13844 * - when returning vtypes in registers, generate IR and append it to the end of the
13845 * last bb instead of doing it in the epilog.
13846 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13854 - When to decompose opcodes:
13855 - earlier: this makes some optimizations hard to implement, since the low level IR
13856 no longer contains the neccessary information. But it is easier to do.
13857 - later: harder to implement, enables more optimizations.
13858 - Branches inside bblocks:
13859 - created when decomposing complex opcodes.
13860 - branches to another bblock: harmless, but not tracked by the branch
13861 optimizations, so need to branch to a label at the start of the bblock.
13862 - branches to inside the same bblock: very problematic, trips up the local
13863 reg allocator. Can be fixed by spitting the current bblock, but that is a
13864 complex operation, since some local vregs can become global vregs etc.
13865 - Local/global vregs:
13866 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13867 local register allocator.
13868 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13869 structure, created by mono_create_var (). Assigned to hregs or the stack by
13870 the global register allocator.
13871 - When to do optimizations like alu->alu_imm:
13872 - earlier -> saves work later on since the IR will be smaller/simpler
13873 - later -> can work on more instructions
13874 - Handling of valuetypes:
13875 - When a vtype is pushed on the stack, a new temporary is created, an
13876 instruction computing its address (LDADDR) is emitted and pushed on
13877 the stack. Need to optimize cases when the vtype is used immediately as in
13878 argument passing, stloc etc.
13879 - Instead of the to_end stuff in the old JIT, simply call the function handling
13880 the values on the stack before emitting the last instruction of the bb.
13883 #endif /* DISABLE_JIT */