2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_replace_type (type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
312 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
368 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
384 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
395 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 /* Emit conversions so both operands of a binary opcode are of the same type */
464 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
466 MonoInst *arg1 = *arg1_ref;
467 MonoInst *arg2 = *arg2_ref;
470 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
471 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
474 /* Mixing r4/r8 is allowed by the spec */
475 if (arg1->type == STACK_R4) {
476 int dreg = alloc_freg (cfg);
478 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
479 conv->type = STACK_R8;
483 if (arg2->type == STACK_R4) {
484 int dreg = alloc_freg (cfg);
486 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
487 conv->type = STACK_R8;
493 #if SIZEOF_REGISTER == 8
494 /* FIXME: Need to add many more cases */
495 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
498 int dr = alloc_preg (cfg);
499 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
500 (ins)->sreg2 = widen->dreg;
505 #define ADD_BINOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 ins->sreg2 = sp [1]->dreg; \
510 type_from_op (cfg, ins, sp [0], sp [1]); \
512 /* Have to insert a widening op */ \
513 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
514 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
515 MONO_ADD_INS ((cfg)->cbb, (ins)); \
516 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
519 #define ADD_UNOP(op) do { \
520 MONO_INST_NEW (cfg, ins, (op)); \
522 ins->sreg1 = sp [0]->dreg; \
523 type_from_op (cfg, ins, sp [0], NULL); \
525 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
526 MONO_ADD_INS ((cfg)->cbb, (ins)); \
527 *sp++ = mono_decompose_opcode (cfg, ins); \
530 #define ADD_BINCOND(next_block) do { \
533 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
534 cmp->sreg1 = sp [0]->dreg; \
535 cmp->sreg2 = sp [1]->dreg; \
536 type_from_op (cfg, cmp, sp [0], sp [1]); \
538 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
539 type_from_op (cfg, ins, sp [0], sp [1]); \
540 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
541 GET_BBLOCK (cfg, tblock, target); \
542 link_bblock (cfg, bblock, tblock); \
543 ins->inst_true_bb = tblock; \
544 if ((next_block)) { \
545 link_bblock (cfg, bblock, (next_block)); \
546 ins->inst_false_bb = (next_block); \
547 start_new_bblock = 1; \
549 GET_BBLOCK (cfg, tblock, ip); \
550 link_bblock (cfg, bblock, tblock); \
551 ins->inst_false_bb = tblock; \
552 start_new_bblock = 2; \
554 if (sp != stack_start) { \
555 handle_stack_args (cfg, stack_start, sp - stack_start); \
556 CHECK_UNVERIFIABLE (cfg); \
558 MONO_ADD_INS (bblock, cmp); \
559 MONO_ADD_INS (bblock, ins); \
563 * link_bblock: Links two basic blocks
565 * links two basic blocks in the control flow graph, the 'from'
566 * argument is the starting block and the 'to' argument is the block
567 * the control flow ends to after 'from'.
570 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
572 MonoBasicBlock **newa;
576 if (from->cil_code) {
578 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
580 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
583 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
585 printf ("edge from entry to exit\n");
590 for (i = 0; i < from->out_count; ++i) {
591 if (to == from->out_bb [i]) {
597 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
598 for (i = 0; i < from->out_count; ++i) {
599 newa [i] = from->out_bb [i];
607 for (i = 0; i < to->in_count; ++i) {
608 if (from == to->in_bb [i]) {
614 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
615 for (i = 0; i < to->in_count; ++i) {
616 newa [i] = to->in_bb [i];
625 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
627 link_bblock (cfg, from, to);
631 * mono_find_block_region:
633 * We mark each basic block with a region ID. We use that to avoid BB
634 * optimizations when blocks are in different regions.
637 * A region token that encodes where this region is, and information
638 * about the clause owner for this block.
640 * The region encodes the try/catch/filter clause that owns this block
641 * as well as the type. -1 is a special value that represents a block
642 * that is in none of try/catch/filter.
645 mono_find_block_region (MonoCompile *cfg, int offset)
647 MonoMethodHeader *header = cfg->header;
648 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
654 (offset < (clause->handler_offset)))
655 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
657 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
658 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
659 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
660 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
661 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
663 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
666 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
667 return ((i + 1) << 8) | clause->flags;
674 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
676 MonoMethodHeader *header = cfg->header;
677 MonoExceptionClause *clause;
681 for (i = 0; i < header->num_clauses; ++i) {
682 clause = &header->clauses [i];
683 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
684 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
685 if (clause->flags == type)
686 res = g_list_append (res, clause);
693 mono_create_spvar_for_region (MonoCompile *cfg, int region)
697 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
701 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
702 /* prevent it from being register allocated */
703 var->flags |= MONO_INST_VOLATILE;
705 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
709 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
711 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
715 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
719 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
723 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
724 /* prevent it from being register allocated */
725 var->flags |= MONO_INST_VOLATILE;
727 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
733 * Returns the type used in the eval stack when @type is loaded.
734 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
737 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
741 type = mini_replace_type (type);
742 inst->klass = klass = mono_class_from_mono_type (type);
744 inst->type = STACK_MP;
749 switch (type->type) {
751 inst->type = STACK_INV;
755 case MONO_TYPE_BOOLEAN:
761 inst->type = STACK_I4;
766 case MONO_TYPE_FNPTR:
767 inst->type = STACK_PTR;
769 case MONO_TYPE_CLASS:
770 case MONO_TYPE_STRING:
771 case MONO_TYPE_OBJECT:
772 case MONO_TYPE_SZARRAY:
773 case MONO_TYPE_ARRAY:
774 inst->type = STACK_OBJ;
778 inst->type = STACK_I8;
781 inst->type = cfg->r4_stack_type;
784 inst->type = STACK_R8;
786 case MONO_TYPE_VALUETYPE:
787 if (type->data.klass->enumtype) {
788 type = mono_class_enum_basetype (type->data.klass);
792 inst->type = STACK_VTYPE;
795 case MONO_TYPE_TYPEDBYREF:
796 inst->klass = mono_defaults.typed_reference_class;
797 inst->type = STACK_VTYPE;
799 case MONO_TYPE_GENERICINST:
800 type = &type->data.generic_class->container_class->byval_arg;
804 g_assert (cfg->generic_sharing_context);
805 if (mini_is_gsharedvt_type (cfg, type)) {
806 g_assert (cfg->gsharedvt);
807 inst->type = STACK_VTYPE;
809 inst->type = STACK_OBJ;
813 g_error ("unknown type 0x%02x in eval stack type", type->type);
818 * The following tables are used to quickly validate the IL code in type_from_op ().
821 bin_num_table [STACK_MAX] [STACK_MAX] = {
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
827 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
835 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
838 /* reduce the size of this table */
840 bin_int_table [STACK_MAX] [STACK_MAX] = {
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
852 bin_comp_table [STACK_MAX] [STACK_MAX] = {
853 /* Inv i L p F & O vt r4 */
855 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
856 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
857 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
859 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
860 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
861 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
862 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
865 /* reduce the size of this table */
867 shift_table [STACK_MAX] [STACK_MAX] = {
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
879 * Tables to map from the non-specific opcode to the matching
880 * type-specific opcode.
882 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
884 binops_op_map [STACK_MAX] = {
885 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
888 /* handles from CEE_NEG to CEE_CONV_U8 */
890 unops_op_map [STACK_MAX] = {
891 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
894 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
896 ovfops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
900 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
902 ovf2ops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
906 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
908 ovf3ops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
912 /* handles from CEE_BEQ to CEE_BLT_UN */
914 beqops_op_map [STACK_MAX] = {
915 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
918 /* handles from CEE_CEQ to CEE_CLT_UN */
920 ceqops_op_map [STACK_MAX] = {
921 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
925 * Sets ins->type (the type on the eval stack) according to the
926 * type of the opcode and the arguments to it.
927 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
929 * FIXME: this function sets ins->type unconditionally in some cases, but
930 * it should set it to invalid for some types (a conv.x on an object)
933 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
935 switch (ins->opcode) {
942 /* FIXME: check unverifiable args for STACK_MP */
943 ins->type = bin_num_table [src1->type] [src2->type];
944 ins->opcode += binops_op_map [ins->type];
951 ins->type = bin_int_table [src1->type] [src2->type];
952 ins->opcode += binops_op_map [ins->type];
957 ins->type = shift_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
963 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE;
966 else if (src1->type == STACK_R4)
967 ins->opcode = OP_RCOMPARE;
968 else if (src1->type == STACK_R8)
969 ins->opcode = OP_FCOMPARE;
971 ins->opcode = OP_ICOMPARE;
973 case OP_ICOMPARE_IMM:
974 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
975 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
976 ins->opcode = OP_LCOMPARE_IMM;
988 ins->opcode += beqops_op_map [src1->type];
991 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
992 ins->opcode += ceqops_op_map [src1->type];
998 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
999 ins->opcode += ceqops_op_map [src1->type];
1003 ins->type = neg_table [src1->type];
1004 ins->opcode += unops_op_map [ins->type];
1007 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1008 ins->type = src1->type;
1010 ins->type = STACK_INV;
1011 ins->opcode += unops_op_map [ins->type];
1017 ins->type = STACK_I4;
1018 ins->opcode += unops_op_map [src1->type];
1021 ins->type = STACK_R8;
1022 switch (src1->type) {
1025 ins->opcode = OP_ICONV_TO_R_UN;
1028 ins->opcode = OP_LCONV_TO_R_UN;
1032 case CEE_CONV_OVF_I1:
1033 case CEE_CONV_OVF_U1:
1034 case CEE_CONV_OVF_I2:
1035 case CEE_CONV_OVF_U2:
1036 case CEE_CONV_OVF_I4:
1037 case CEE_CONV_OVF_U4:
1038 ins->type = STACK_I4;
1039 ins->opcode += ovf3ops_op_map [src1->type];
1041 case CEE_CONV_OVF_I_UN:
1042 case CEE_CONV_OVF_U_UN:
1043 ins->type = STACK_PTR;
1044 ins->opcode += ovf2ops_op_map [src1->type];
1046 case CEE_CONV_OVF_I1_UN:
1047 case CEE_CONV_OVF_I2_UN:
1048 case CEE_CONV_OVF_I4_UN:
1049 case CEE_CONV_OVF_U1_UN:
1050 case CEE_CONV_OVF_U2_UN:
1051 case CEE_CONV_OVF_U4_UN:
1052 ins->type = STACK_I4;
1053 ins->opcode += ovf2ops_op_map [src1->type];
1056 ins->type = STACK_PTR;
1057 switch (src1->type) {
1059 ins->opcode = OP_ICONV_TO_U;
1063 #if SIZEOF_VOID_P == 8
1064 ins->opcode = OP_LCONV_TO_U;
1066 ins->opcode = OP_MOVE;
1070 ins->opcode = OP_LCONV_TO_U;
1073 ins->opcode = OP_FCONV_TO_U;
1079 ins->type = STACK_I8;
1080 ins->opcode += unops_op_map [src1->type];
1082 case CEE_CONV_OVF_I8:
1083 case CEE_CONV_OVF_U8:
1084 ins->type = STACK_I8;
1085 ins->opcode += ovf3ops_op_map [src1->type];
1087 case CEE_CONV_OVF_U8_UN:
1088 case CEE_CONV_OVF_I8_UN:
1089 ins->type = STACK_I8;
1090 ins->opcode += ovf2ops_op_map [src1->type];
1093 ins->type = cfg->r4_stack_type;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1098 ins->opcode += unops_op_map [src1->type];
1101 ins->type = STACK_R8;
1105 ins->type = STACK_I4;
1106 ins->opcode += ovfops_op_map [src1->type];
1109 case CEE_CONV_OVF_I:
1110 case CEE_CONV_OVF_U:
1111 ins->type = STACK_PTR;
1112 ins->opcode += ovfops_op_map [src1->type];
1115 case CEE_ADD_OVF_UN:
1117 case CEE_MUL_OVF_UN:
1119 case CEE_SUB_OVF_UN:
1120 ins->type = bin_num_table [src1->type] [src2->type];
1121 ins->opcode += ovfops_op_map [src1->type];
1122 if (ins->type == STACK_R8)
1123 ins->type = STACK_INV;
1125 case OP_LOAD_MEMBASE:
1126 ins->type = STACK_PTR;
1128 case OP_LOADI1_MEMBASE:
1129 case OP_LOADU1_MEMBASE:
1130 case OP_LOADI2_MEMBASE:
1131 case OP_LOADU2_MEMBASE:
1132 case OP_LOADI4_MEMBASE:
1133 case OP_LOADU4_MEMBASE:
1134 ins->type = STACK_PTR;
1136 case OP_LOADI8_MEMBASE:
1137 ins->type = STACK_I8;
1139 case OP_LOADR4_MEMBASE:
1140 ins->type = cfg->r4_stack_type;
1142 case OP_LOADR8_MEMBASE:
1143 ins->type = STACK_R8;
1146 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1150 if (ins->type == STACK_MP)
1151 ins->klass = mono_defaults.object_class;
1156 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1162 param_table [STACK_MAX] [STACK_MAX] = {
1167 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1171 switch (args->type) {
1181 for (i = 0; i < sig->param_count; ++i) {
1182 switch (args [i].type) {
1186 if (!sig->params [i]->byref)
1190 if (sig->params [i]->byref)
1192 switch (sig->params [i]->type) {
1193 case MONO_TYPE_CLASS:
1194 case MONO_TYPE_STRING:
1195 case MONO_TYPE_OBJECT:
1196 case MONO_TYPE_SZARRAY:
1197 case MONO_TYPE_ARRAY:
1204 if (sig->params [i]->byref)
1206 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1215 /*if (!param_table [args [i].type] [sig->params [i]->type])
1223 * When we need a pointer to the current domain many times in a method, we
1224 * call mono_domain_get() once and we store the result in a local variable.
1225 * This function returns the variable that represents the MonoDomain*.
1227 inline static MonoInst *
1228 mono_get_domainvar (MonoCompile *cfg)
1230 if (!cfg->domainvar)
1231 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1232 return cfg->domainvar;
1236 * The got_var contains the address of the Global Offset Table when AOT
1240 mono_get_got_var (MonoCompile *cfg)
1242 #ifdef MONO_ARCH_NEED_GOT_VAR
1243 if (!cfg->compile_aot)
1245 if (!cfg->got_var) {
1246 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1248 return cfg->got_var;
1255 mono_get_vtable_var (MonoCompile *cfg)
1257 g_assert (cfg->generic_sharing_context);
1259 if (!cfg->rgctx_var) {
1260 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 /* force the var to be stack allocated */
1262 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1265 return cfg->rgctx_var;
1269 type_from_stack_type (MonoInst *ins) {
1270 switch (ins->type) {
1271 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1272 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1273 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1274 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1275 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1277 return &ins->klass->this_arg;
1278 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1279 case STACK_VTYPE: return &ins->klass->byval_arg;
1281 g_error ("stack type %d to monotype not handled\n", ins->type);
1286 static G_GNUC_UNUSED int
1287 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1289 t = mono_type_get_underlying_type (t);
1293 case MONO_TYPE_BOOLEAN:
1296 case MONO_TYPE_CHAR:
1303 case MONO_TYPE_FNPTR:
1305 case MONO_TYPE_CLASS:
1306 case MONO_TYPE_STRING:
1307 case MONO_TYPE_OBJECT:
1308 case MONO_TYPE_SZARRAY:
1309 case MONO_TYPE_ARRAY:
1315 return cfg->r4_stack_type;
1318 case MONO_TYPE_VALUETYPE:
1319 case MONO_TYPE_TYPEDBYREF:
1321 case MONO_TYPE_GENERICINST:
1322 if (mono_type_generic_inst_is_valuetype (t))
1328 g_assert_not_reached ();
1335 array_access_to_klass (int opcode)
1339 return mono_defaults.byte_class;
1341 return mono_defaults.uint16_class;
1344 return mono_defaults.int_class;
1347 return mono_defaults.sbyte_class;
1350 return mono_defaults.int16_class;
1353 return mono_defaults.int32_class;
1355 return mono_defaults.uint32_class;
1358 return mono_defaults.int64_class;
1361 return mono_defaults.single_class;
1364 return mono_defaults.double_class;
1365 case CEE_LDELEM_REF:
1366 case CEE_STELEM_REF:
1367 return mono_defaults.object_class;
1369 g_assert_not_reached ();
1375 * We try to share variables when possible
1378 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1383 /* inlining can result in deeper stacks */
1384 if (slot >= cfg->header->max_stack)
1385 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1387 pos = ins->type - 1 + slot * STACK_MAX;
1389 switch (ins->type) {
1396 if ((vnum = cfg->intvars [pos]))
1397 return cfg->varinfo [vnum];
1398 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1399 cfg->intvars [pos] = res->inst_c0;
1402 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1408 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1411 * Don't use this if a generic_context is set, since that means AOT can't
1412 * look up the method using just the image+token.
1413 * table == 0 means this is a reference made from a wrapper.
1415 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1416 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1417 jump_info_token->image = image;
1418 jump_info_token->token = token;
1419 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1424 * This function is called to handle items that are left on the evaluation stack
1425 * at basic block boundaries. What happens is that we save the values to local variables
1426 * and we reload them later when first entering the target basic block (with the
1427 * handle_loaded_temps () function).
1428 * A single joint point will use the same variables (stored in the array bb->out_stack or
1429 * bb->in_stack, if the basic block is before or after the joint point).
1431 * This function needs to be called _before_ emitting the last instruction of
1432 * the bb (i.e. before emitting a branch).
1433 * If the stack merge fails at a join point, cfg->unverifiable is set.
1436 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1439 MonoBasicBlock *bb = cfg->cbb;
1440 MonoBasicBlock *outb;
1441 MonoInst *inst, **locals;
1446 if (cfg->verbose_level > 3)
1447 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1448 if (!bb->out_scount) {
1449 bb->out_scount = count;
1450 //printf ("bblock %d has out:", bb->block_num);
1452 for (i = 0; i < bb->out_count; ++i) {
1453 outb = bb->out_bb [i];
1454 /* exception handlers are linked, but they should not be considered for stack args */
1455 if (outb->flags & BB_EXCEPTION_HANDLER)
1457 //printf (" %d", outb->block_num);
1458 if (outb->in_stack) {
1460 bb->out_stack = outb->in_stack;
1466 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1467 for (i = 0; i < count; ++i) {
1469 * try to reuse temps already allocated for this purpouse, if they occupy the same
1470 * stack slot and if they are of the same type.
1471 * This won't cause conflicts since if 'local' is used to
1472 * store one of the values in the in_stack of a bblock, then
1473 * the same variable will be used for the same outgoing stack
1475 * This doesn't work when inlining methods, since the bblocks
1476 * in the inlined methods do not inherit their in_stack from
1477 * the bblock they are inlined to. See bug #58863 for an
1480 if (cfg->inlined_method)
1481 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1483 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1488 for (i = 0; i < bb->out_count; ++i) {
1489 outb = bb->out_bb [i];
1490 /* exception handlers are linked, but they should not be considered for stack args */
1491 if (outb->flags & BB_EXCEPTION_HANDLER)
1493 if (outb->in_scount) {
1494 if (outb->in_scount != bb->out_scount) {
1495 cfg->unverifiable = TRUE;
1498 continue; /* check they are the same locals */
1500 outb->in_scount = count;
1501 outb->in_stack = bb->out_stack;
1504 locals = bb->out_stack;
1506 for (i = 0; i < count; ++i) {
1507 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1508 inst->cil_code = sp [i]->cil_code;
1509 sp [i] = locals [i];
1510 if (cfg->verbose_level > 3)
1511 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1515 * It is possible that the out bblocks already have in_stack assigned, and
1516 * the in_stacks differ. In this case, we will store to all the different
1523 /* Find a bblock which has a different in_stack */
1525 while (bindex < bb->out_count) {
1526 outb = bb->out_bb [bindex];
1527 /* exception handlers are linked, but they should not be considered for stack args */
1528 if (outb->flags & BB_EXCEPTION_HANDLER) {
1532 if (outb->in_stack != locals) {
1533 for (i = 0; i < count; ++i) {
1534 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1535 inst->cil_code = sp [i]->cil_code;
1536 sp [i] = locals [i];
1537 if (cfg->verbose_level > 3)
1538 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1540 locals = outb->in_stack;
1549 /* Emit code which loads interface_offsets [klass->interface_id]
1550 * The array is stored in memory before vtable.
1553 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1555 if (cfg->compile_aot) {
1556 int ioffset_reg = alloc_preg (cfg);
1557 int iid_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1569 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1571 int ibitmap_reg = alloc_preg (cfg);
1572 #ifdef COMPRESSED_INTERFACE_BITMAP
1574 MonoInst *res, *ins;
1575 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1576 MONO_ADD_INS (cfg->cbb, ins);
1578 if (cfg->compile_aot)
1579 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1581 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1582 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1583 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1585 int ibitmap_byte_reg = alloc_preg (cfg);
1587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1589 if (cfg->compile_aot) {
1590 int iid_reg = alloc_preg (cfg);
1591 int shifted_iid_reg = alloc_preg (cfg);
1592 int ibitmap_byte_address_reg = alloc_preg (cfg);
1593 int masked_iid_reg = alloc_preg (cfg);
1594 int iid_one_bit_reg = alloc_preg (cfg);
1595 int iid_bit_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1598 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1599 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1601 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1602 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1605 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1612 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1613 * stored in "klass_reg" implements the interface "klass".
1616 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1618 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1622 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1623 * stored in "vtable_reg" implements the interface "klass".
1626 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1628 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1632 * Emit code which checks whenever the interface id of @klass is smaller than
1633 * than the value given by max_iid_reg.
1636 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1637 MonoBasicBlock *false_target)
1639 if (cfg->compile_aot) {
1640 int iid_reg = alloc_preg (cfg);
1641 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1642 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1649 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1652 /* Same as above, but obtains max_iid from a vtable */
1654 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1655 MonoBasicBlock *false_target)
1657 int max_iid_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1660 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1663 /* Same as above, but obtains max_iid from a klass */
1665 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1666 MonoBasicBlock *false_target)
1668 int max_iid_reg = alloc_preg (cfg);
1670 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1671 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1675 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1677 int idepth_reg = alloc_preg (cfg);
1678 int stypes_reg = alloc_preg (cfg);
1679 int stype = alloc_preg (cfg);
1681 mono_class_setup_supertypes (klass);
1683 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1684 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1685 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1686 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1692 } else if (cfg->compile_aot) {
1693 int const_reg = alloc_preg (cfg);
1694 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1695 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1699 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1703 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1705 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1709 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1711 int intf_reg = alloc_preg (cfg);
1713 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1714 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1715 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1719 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1723 * Variant of the above that takes a register to the class, not the vtable.
1726 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1728 int intf_bit_reg = alloc_preg (cfg);
1730 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1731 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1732 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1736 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1740 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1743 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1744 } else if (cfg->compile_aot) {
1745 int const_reg = alloc_preg (cfg);
1746 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1751 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1755 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1757 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1761 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1763 if (cfg->compile_aot) {
1764 int const_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1766 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1770 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1774 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1777 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1780 int rank_reg = alloc_preg (cfg);
1781 int eclass_reg = alloc_preg (cfg);
1783 g_assert (!klass_inst);
1784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1786 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1787 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1789 if (klass->cast_class == mono_defaults.object_class) {
1790 int parent_reg = alloc_preg (cfg);
1791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1792 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1793 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1794 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1795 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1796 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1797 } else if (klass->cast_class == mono_defaults.enum_class) {
1798 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1799 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1800 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1802 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1803 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1806 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1807 /* Check that the object is a vector too */
1808 int bounds_reg = alloc_preg (cfg);
1809 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1811 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1814 int idepth_reg = alloc_preg (cfg);
1815 int stypes_reg = alloc_preg (cfg);
1816 int stype = alloc_preg (cfg);
1818 mono_class_setup_supertypes (klass);
1820 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1821 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1822 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1823 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1826 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1827 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1832 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1834 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1838 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1842 g_assert (val == 0);
1847 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1850 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1856 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1858 #if SIZEOF_REGISTER == 8
1860 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1866 val_reg = alloc_preg (cfg);
1868 if (SIZEOF_REGISTER == 8)
1869 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1871 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1874 /* This could be optimized further if neccesary */
1876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1883 #if !NO_UNALIGNED_ACCESS
1884 if (SIZEOF_REGISTER == 8) {
1886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1899 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1904 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1916 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1923 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1924 g_assert (size < 10000);
1927 /* This could be optimized further if neccesary */
1929 cur_reg = alloc_preg (cfg);
1930 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1931 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1938 #if !NO_UNALIGNED_ACCESS
1939 if (SIZEOF_REGISTER == 8) {
1941 cur_reg = alloc_preg (cfg);
1942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1952 cur_reg = alloc_preg (cfg);
1953 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1954 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1960 cur_reg = alloc_preg (cfg);
1961 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1962 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1968 cur_reg = alloc_preg (cfg);
1969 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1970 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1978 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1982 if (cfg->compile_aot) {
1983 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1984 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1986 ins->sreg2 = c->dreg;
1987 MONO_ADD_INS (cfg->cbb, ins);
1989 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1991 ins->inst_offset = mini_get_tls_offset (tls_key);
1992 MONO_ADD_INS (cfg->cbb, ins);
1999 * Emit IR to push the current LMF onto the LMF stack.
2002 emit_push_lmf (MonoCompile *cfg)
2005 * Emit IR to push the LMF:
2006 * lmf_addr = <lmf_addr from tls>
2007 * lmf->lmf_addr = lmf_addr
2008 * lmf->prev_lmf = *lmf_addr
2011 int lmf_reg, prev_lmf_reg;
2012 MonoInst *ins, *lmf_ins;
2017 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2018 /* Load current lmf */
2019 lmf_ins = mono_get_lmf_intrinsic (cfg);
2021 MONO_ADD_INS (cfg->cbb, lmf_ins);
2022 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2023 lmf_reg = ins->dreg;
2024 /* Save previous_lmf */
2025 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2027 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2030 * Store lmf_addr in a variable, so it can be allocated to a global register.
2032 if (!cfg->lmf_addr_var)
2033 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2036 ins = mono_get_jit_tls_intrinsic (cfg);
2038 int jit_tls_dreg = ins->dreg;
2040 MONO_ADD_INS (cfg->cbb, ins);
2041 lmf_reg = alloc_preg (cfg);
2042 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2044 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2047 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2049 MONO_ADD_INS (cfg->cbb, lmf_ins);
2052 MonoInst *args [16], *jit_tls_ins, *ins;
2054 /* Inline mono_get_lmf_addr () */
2055 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2057 /* Load mono_jit_tls_id */
2058 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2059 /* call pthread_getspecific () */
2060 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2061 /* lmf_addr = &jit_tls->lmf */
2062 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2065 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2069 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2071 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2072 lmf_reg = ins->dreg;
2074 prev_lmf_reg = alloc_preg (cfg);
2075 /* Save previous_lmf */
2076 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2077 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2079 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2086 * Emit IR to pop the current LMF from the LMF stack.
2089 emit_pop_lmf (MonoCompile *cfg)
2091 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2097 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2098 lmf_reg = ins->dreg;
2100 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2101 /* Load previous_lmf */
2102 prev_lmf_reg = alloc_preg (cfg);
2103 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2105 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2108 * Emit IR to pop the LMF:
2109 * *(lmf->lmf_addr) = lmf->prev_lmf
2111 /* This could be called before emit_push_lmf () */
2112 if (!cfg->lmf_addr_var)
2113 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2114 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2116 prev_lmf_reg = alloc_preg (cfg);
2117 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2118 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2123 emit_instrumentation_call (MonoCompile *cfg, void *func)
2125 MonoInst *iargs [1];
2128 * Avoid instrumenting inlined methods since it can
2129 * distort profiling results.
2131 if (cfg->method != cfg->current_method)
2134 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2135 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2136 mono_emit_jit_icall (cfg, func, iargs);
2141 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2144 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2147 type = mini_get_basic_type_from_generic (gsctx, type);
2148 type = mini_replace_type (type);
2149 switch (type->type) {
2150 case MONO_TYPE_VOID:
2151 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2154 case MONO_TYPE_BOOLEAN:
2157 case MONO_TYPE_CHAR:
2160 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2164 case MONO_TYPE_FNPTR:
2165 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2166 case MONO_TYPE_CLASS:
2167 case MONO_TYPE_STRING:
2168 case MONO_TYPE_OBJECT:
2169 case MONO_TYPE_SZARRAY:
2170 case MONO_TYPE_ARRAY:
2171 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2174 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2177 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2179 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2181 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2182 case MONO_TYPE_VALUETYPE:
2183 if (type->data.klass->enumtype) {
2184 type = mono_class_enum_basetype (type->data.klass);
2187 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2188 case MONO_TYPE_TYPEDBYREF:
2189 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2190 case MONO_TYPE_GENERICINST:
2191 type = &type->data.generic_class->container_class->byval_arg;
2194 case MONO_TYPE_MVAR:
2196 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2198 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2204 * target_type_is_incompatible:
2205 * @cfg: MonoCompile context
2207 * Check that the item @arg on the evaluation stack can be stored
2208 * in the target type (can be a local, or field, etc).
2209 * The cfg arg can be used to check if we need verification or just
2212 * Returns: non-0 value if arg can't be stored on a target.
2215 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2217 MonoType *simple_type;
2220 target = mini_replace_type (target);
2221 if (target->byref) {
2222 /* FIXME: check that the pointed to types match */
2223 if (arg->type == STACK_MP)
2224 return arg->klass != mono_class_from_mono_type (target);
2225 if (arg->type == STACK_PTR)
2230 simple_type = mono_type_get_underlying_type (target);
2231 switch (simple_type->type) {
2232 case MONO_TYPE_VOID:
2236 case MONO_TYPE_BOOLEAN:
2239 case MONO_TYPE_CHAR:
2242 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2246 /* STACK_MP is needed when setting pinned locals */
2247 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2252 case MONO_TYPE_FNPTR:
2254 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2255 * in native int. (#688008).
2257 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2260 case MONO_TYPE_CLASS:
2261 case MONO_TYPE_STRING:
2262 case MONO_TYPE_OBJECT:
2263 case MONO_TYPE_SZARRAY:
2264 case MONO_TYPE_ARRAY:
2265 if (arg->type != STACK_OBJ)
2267 /* FIXME: check type compatibility */
2271 if (arg->type != STACK_I8)
2275 if (arg->type != cfg->r4_stack_type)
2279 if (arg->type != STACK_R8)
2282 case MONO_TYPE_VALUETYPE:
2283 if (arg->type != STACK_VTYPE)
2285 klass = mono_class_from_mono_type (simple_type);
2286 if (klass != arg->klass)
2289 case MONO_TYPE_TYPEDBYREF:
2290 if (arg->type != STACK_VTYPE)
2292 klass = mono_class_from_mono_type (simple_type);
2293 if (klass != arg->klass)
2296 case MONO_TYPE_GENERICINST:
2297 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2298 if (arg->type != STACK_VTYPE)
2300 klass = mono_class_from_mono_type (simple_type);
2301 if (klass != arg->klass)
2305 if (arg->type != STACK_OBJ)
2307 /* FIXME: check type compatibility */
2311 case MONO_TYPE_MVAR:
2312 g_assert (cfg->generic_sharing_context);
2313 if (mini_type_var_is_vt (cfg, simple_type)) {
2314 if (arg->type != STACK_VTYPE)
2317 if (arg->type != STACK_OBJ)
2322 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2328 * Prepare arguments for passing to a function call.
2329 * Return a non-zero value if the arguments can't be passed to the given
2331 * The type checks are not yet complete and some conversions may need
2332 * casts on 32 or 64 bit architectures.
2334 * FIXME: implement this using target_type_is_incompatible ()
2337 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2339 MonoType *simple_type;
2343 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2347 for (i = 0; i < sig->param_count; ++i) {
2348 if (sig->params [i]->byref) {
2349 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2353 simple_type = sig->params [i];
2354 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2356 switch (simple_type->type) {
2357 case MONO_TYPE_VOID:
2362 case MONO_TYPE_BOOLEAN:
2365 case MONO_TYPE_CHAR:
2368 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2374 case MONO_TYPE_FNPTR:
2375 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2378 case MONO_TYPE_CLASS:
2379 case MONO_TYPE_STRING:
2380 case MONO_TYPE_OBJECT:
2381 case MONO_TYPE_SZARRAY:
2382 case MONO_TYPE_ARRAY:
2383 if (args [i]->type != STACK_OBJ)
2388 if (args [i]->type != STACK_I8)
2392 if (args [i]->type != cfg->r4_stack_type)
2396 if (args [i]->type != STACK_R8)
2399 case MONO_TYPE_VALUETYPE:
2400 if (simple_type->data.klass->enumtype) {
2401 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2404 if (args [i]->type != STACK_VTYPE)
2407 case MONO_TYPE_TYPEDBYREF:
2408 if (args [i]->type != STACK_VTYPE)
2411 case MONO_TYPE_GENERICINST:
2412 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2415 case MONO_TYPE_MVAR:
2417 if (args [i]->type != STACK_VTYPE)
2421 g_error ("unknown type 0x%02x in check_call_signature",
2429 callvirt_to_call (int opcode)
2432 case OP_CALL_MEMBASE:
2434 case OP_VOIDCALL_MEMBASE:
2436 case OP_FCALL_MEMBASE:
2438 case OP_RCALL_MEMBASE:
2440 case OP_VCALL_MEMBASE:
2442 case OP_LCALL_MEMBASE:
2445 g_assert_not_reached ();
2451 /* Either METHOD or IMT_ARG needs to be set */
2453 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2457 if (COMPILE_LLVM (cfg)) {
2458 method_reg = alloc_preg (cfg);
2461 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2462 } else if (cfg->compile_aot) {
2463 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2466 MONO_INST_NEW (cfg, ins, OP_PCONST);
2467 ins->inst_p0 = method;
2468 ins->dreg = method_reg;
2469 MONO_ADD_INS (cfg->cbb, ins);
2473 call->imt_arg_reg = method_reg;
2475 #ifdef MONO_ARCH_IMT_REG
2476 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2478 /* Need this to keep the IMT arg alive */
2479 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2484 #ifdef MONO_ARCH_IMT_REG
2485 method_reg = alloc_preg (cfg);
2488 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2489 } else if (cfg->compile_aot) {
2490 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2493 MONO_INST_NEW (cfg, ins, OP_PCONST);
2494 ins->inst_p0 = method;
2495 ins->dreg = method_reg;
2496 MONO_ADD_INS (cfg->cbb, ins);
2499 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2501 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2505 static MonoJumpInfo *
2506 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2508 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2512 ji->data.target = target;
2518 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2520 if (cfg->generic_sharing_context)
2521 return mono_class_check_context_used (klass);
2527 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2529 if (cfg->generic_sharing_context)
2530 return mono_method_check_context_used (method);
2536 * check_method_sharing:
2538 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2541 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2543 gboolean pass_vtable = FALSE;
2544 gboolean pass_mrgctx = FALSE;
2546 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2547 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2548 gboolean sharable = FALSE;
2550 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2553 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2554 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2555 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2557 sharable = sharing_enabled && context_sharable;
2561 * Pass vtable iff target method might
2562 * be shared, which means that sharing
2563 * is enabled for its class and its
2564 * context is sharable (and it's not a
2567 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2571 if (mini_method_get_context (cmethod) &&
2572 mini_method_get_context (cmethod)->method_inst) {
2573 g_assert (!pass_vtable);
2575 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2578 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2579 MonoGenericContext *context = mini_method_get_context (cmethod);
2580 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2582 if (sharing_enabled && context_sharable)
2584 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2589 if (out_pass_vtable)
2590 *out_pass_vtable = pass_vtable;
2591 if (out_pass_mrgctx)
2592 *out_pass_mrgctx = pass_mrgctx;
2595 inline static MonoCallInst *
2596 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2597 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2601 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2606 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2608 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2610 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2613 call->signature = sig;
2614 call->rgctx_reg = rgctx;
2615 sig_ret = mini_replace_type (sig->ret);
2617 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2620 if (mini_type_is_vtype (cfg, sig_ret)) {
2621 call->vret_var = cfg->vret_addr;
2622 //g_assert_not_reached ();
2624 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2625 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2628 temp->backend.is_pinvoke = sig->pinvoke;
2631 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2632 * address of return value to increase optimization opportunities.
2633 * Before vtype decomposition, the dreg of the call ins itself represents the
2634 * fact the call modifies the return value. After decomposition, the call will
2635 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2636 * will be transformed into an LDADDR.
2638 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2639 loada->dreg = alloc_preg (cfg);
2640 loada->inst_p0 = temp;
2641 /* We reference the call too since call->dreg could change during optimization */
2642 loada->inst_p1 = call;
2643 MONO_ADD_INS (cfg->cbb, loada);
2645 call->inst.dreg = temp->dreg;
2647 call->vret_var = loada;
2648 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2649 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2651 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2652 if (COMPILE_SOFT_FLOAT (cfg)) {
2654 * If the call has a float argument, we would need to do an r8->r4 conversion using
2655 * an icall, but that cannot be done during the call sequence since it would clobber
2656 * the call registers + the stack. So we do it before emitting the call.
2658 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2660 MonoInst *in = call->args [i];
2662 if (i >= sig->hasthis)
2663 t = sig->params [i - sig->hasthis];
2665 t = &mono_defaults.int_class->byval_arg;
2666 t = mono_type_get_underlying_type (t);
2668 if (!t->byref && t->type == MONO_TYPE_R4) {
2669 MonoInst *iargs [1];
2673 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2675 /* The result will be in an int vreg */
2676 call->args [i] = conv;
2682 call->need_unbox_trampoline = unbox_trampoline;
2685 if (COMPILE_LLVM (cfg))
2686 mono_llvm_emit_call (cfg, call);
2688 mono_arch_emit_call (cfg, call);
2690 mono_arch_emit_call (cfg, call);
2693 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2694 cfg->flags |= MONO_CFG_HAS_CALLS;
2700 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2702 #ifdef MONO_ARCH_RGCTX_REG
2703 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2704 cfg->uses_rgctx_reg = TRUE;
2705 call->rgctx_reg = TRUE;
2707 call->rgctx_arg_reg = rgctx_reg;
2714 inline static MonoInst*
2715 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2720 gboolean check_sp = FALSE;
2722 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2723 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2725 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2730 rgctx_reg = mono_alloc_preg (cfg);
2731 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2735 if (!cfg->stack_inbalance_var)
2736 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2738 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2739 ins->dreg = cfg->stack_inbalance_var->dreg;
2740 MONO_ADD_INS (cfg->cbb, ins);
2743 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2745 call->inst.sreg1 = addr->dreg;
2748 emit_imt_argument (cfg, call, NULL, imt_arg);
2750 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2755 sp_reg = mono_alloc_preg (cfg);
2757 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2759 MONO_ADD_INS (cfg->cbb, ins);
2761 /* Restore the stack so we don't crash when throwing the exception */
2762 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2763 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2764 MONO_ADD_INS (cfg->cbb, ins);
2766 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2767 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2771 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2773 return (MonoInst*)call;
2777 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2780 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2782 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2785 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2786 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2788 #ifndef DISABLE_REMOTING
2789 gboolean might_be_remote = FALSE;
2791 gboolean virtual = this != NULL;
2792 gboolean enable_for_aot = TRUE;
2796 gboolean need_unbox_trampoline;
2799 sig = mono_method_signature (method);
2802 rgctx_reg = mono_alloc_preg (cfg);
2803 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2806 if (method->string_ctor) {
2807 /* Create the real signature */
2808 /* FIXME: Cache these */
2809 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2810 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2815 context_used = mini_method_check_context_used (cfg, method);
2817 #ifndef DISABLE_REMOTING
2818 might_be_remote = this && sig->hasthis &&
2819 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2820 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2822 if (might_be_remote && context_used) {
2825 g_assert (cfg->generic_sharing_context);
2827 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2829 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2833 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2835 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2837 #ifndef DISABLE_REMOTING
2838 if (might_be_remote)
2839 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2842 call->method = method;
2843 call->inst.flags |= MONO_INST_HAS_METHOD;
2844 call->inst.inst_left = this;
2845 call->tail_call = tail;
2848 int vtable_reg, slot_reg, this_reg;
2851 this_reg = this->dreg;
2853 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2854 MonoInst *dummy_use;
2856 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2858 /* Make a call to delegate->invoke_impl */
2859 call->inst.inst_basereg = this_reg;
2860 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2861 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2863 /* We must emit a dummy use here because the delegate trampoline will
2864 replace the 'this' argument with the delegate target making this activation
2865 no longer a root for the delegate.
2866 This is an issue for delegates that target collectible code such as dynamic
2867 methods of GC'able assemblies.
2869 For a test case look into #667921.
2871 FIXME: a dummy use is not the best way to do it as the local register allocator
2872 will put it on a caller save register and spil it around the call.
2873 Ideally, we would either put it on a callee save register or only do the store part.
2875 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2877 return (MonoInst*)call;
2880 if ((!cfg->compile_aot || enable_for_aot) &&
2881 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2882 (MONO_METHOD_IS_FINAL (method) &&
2883 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2884 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2886 * the method is not virtual, we just need to ensure this is not null
2887 * and then we can call the method directly.
2889 #ifndef DISABLE_REMOTING
2890 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2892 * The check above ensures method is not gshared, this is needed since
2893 * gshared methods can't have wrappers.
2895 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2899 if (!method->string_ctor)
2900 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2902 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2903 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2905 * the method is virtual, but we can statically dispatch since either
2906 * it's class or the method itself are sealed.
2907 * But first we need to ensure it's not a null reference.
2909 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2911 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2913 vtable_reg = alloc_preg (cfg);
2914 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2915 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2918 guint32 imt_slot = mono_method_get_imt_slot (method);
2919 emit_imt_argument (cfg, call, call->method, imt_arg);
2920 slot_reg = vtable_reg;
2921 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2923 if (slot_reg == -1) {
2924 slot_reg = alloc_preg (cfg);
2925 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2926 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2929 slot_reg = vtable_reg;
2930 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2931 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2933 g_assert (mono_method_signature (method)->generic_param_count);
2934 emit_imt_argument (cfg, call, call->method, imt_arg);
2938 call->inst.sreg1 = slot_reg;
2939 call->inst.inst_offset = offset;
2940 call->virtual = TRUE;
2944 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2947 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2949 return (MonoInst*)call;
2953 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2955 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2959 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2966 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2969 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2971 return (MonoInst*)call;
2975 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2977 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2981 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2985 * mono_emit_abs_call:
2987 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2989 inline static MonoInst*
2990 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2991 MonoMethodSignature *sig, MonoInst **args)
2993 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2997 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
3000 if (cfg->abs_patches == NULL)
3001 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3002 g_hash_table_insert (cfg->abs_patches, ji, ji);
3003 ins = mono_emit_native_call (cfg, ji, sig, args);
3004 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3009 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3011 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3012 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3016 * Native code might return non register sized integers
3017 * without initializing the upper bits.
3019 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3020 case OP_LOADI1_MEMBASE:
3021 widen_op = OP_ICONV_TO_I1;
3023 case OP_LOADU1_MEMBASE:
3024 widen_op = OP_ICONV_TO_U1;
3026 case OP_LOADI2_MEMBASE:
3027 widen_op = OP_ICONV_TO_I2;
3029 case OP_LOADU2_MEMBASE:
3030 widen_op = OP_ICONV_TO_U2;
3036 if (widen_op != -1) {
3037 int dreg = alloc_preg (cfg);
3040 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3041 widen->type = ins->type;
3051 get_memcpy_method (void)
3053 static MonoMethod *memcpy_method = NULL;
3054 if (!memcpy_method) {
3055 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3057 g_error ("Old corlib found. Install a new one");
3059 return memcpy_method;
3063 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3065 MonoClassField *field;
3066 gpointer iter = NULL;
3068 while ((field = mono_class_get_fields (klass, &iter))) {
3071 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3073 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3074 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3075 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3076 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3078 MonoClass *field_class = mono_class_from_mono_type (field->type);
3079 if (field_class->has_references)
3080 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3086 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3088 int card_table_shift_bits;
3089 gpointer card_table_mask;
3091 MonoInst *dummy_use;
3092 int nursery_shift_bits;
3093 size_t nursery_size;
3094 gboolean has_card_table_wb = FALSE;
3096 if (!cfg->gen_write_barriers)
3099 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3101 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3103 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3104 has_card_table_wb = TRUE;
3107 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3110 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3111 wbarrier->sreg1 = ptr->dreg;
3112 wbarrier->sreg2 = value->dreg;
3113 MONO_ADD_INS (cfg->cbb, wbarrier);
3114 } else if (card_table) {
3115 int offset_reg = alloc_preg (cfg);
3116 int card_reg = alloc_preg (cfg);
3119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3120 if (card_table_mask)
3121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3123 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3124 * IMM's larger than 32bits.
3126 if (cfg->compile_aot) {
3127 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3129 MONO_INST_NEW (cfg, ins, OP_PCONST);
3130 ins->inst_p0 = card_table;
3131 ins->dreg = card_reg;
3132 MONO_ADD_INS (cfg->cbb, ins);
3135 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3136 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3138 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3139 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3142 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3146 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3148 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3149 unsigned need_wb = 0;
3154 /*types with references can't have alignment smaller than sizeof(void*) */
3155 if (align < SIZEOF_VOID_P)
3158 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3159 if (size > 32 * SIZEOF_VOID_P)
3162 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3164 /* We don't unroll more than 5 stores to avoid code bloat. */
3165 if (size > 5 * SIZEOF_VOID_P) {
3166 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3167 size += (SIZEOF_VOID_P - 1);
3168 size &= ~(SIZEOF_VOID_P - 1);
3170 EMIT_NEW_ICONST (cfg, iargs [2], size);
3171 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3172 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3176 destreg = iargs [0]->dreg;
3177 srcreg = iargs [1]->dreg;
3180 dest_ptr_reg = alloc_preg (cfg);
3181 tmp_reg = alloc_preg (cfg);
3184 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3186 while (size >= SIZEOF_VOID_P) {
3187 MonoInst *load_inst;
3188 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3189 load_inst->dreg = tmp_reg;
3190 load_inst->inst_basereg = srcreg;
3191 load_inst->inst_offset = offset;
3192 MONO_ADD_INS (cfg->cbb, load_inst);
3194 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3197 emit_write_barrier (cfg, iargs [0], load_inst);
3199 offset += SIZEOF_VOID_P;
3200 size -= SIZEOF_VOID_P;
3203 /*tmp += sizeof (void*)*/
3204 if (size >= SIZEOF_VOID_P) {
3205 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3206 MONO_ADD_INS (cfg->cbb, iargs [0]);
3210 /* Those cannot be references since size < sizeof (void*) */
3212 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3213 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3219 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3220 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3226 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3227 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3236 * Emit code to copy a valuetype of type @klass whose address is stored in
3237 * @src->dreg to memory whose address is stored at @dest->dreg.
3240 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3242 MonoInst *iargs [4];
3243 int context_used, n;
3245 MonoMethod *memcpy_method;
3246 MonoInst *size_ins = NULL;
3247 MonoInst *memcpy_ins = NULL;
3251 * This check breaks with spilled vars... need to handle it during verification anyway.
3252 * g_assert (klass && klass == src->klass && klass == dest->klass);
3255 if (mini_is_gsharedvt_klass (cfg, klass)) {
3257 context_used = mini_class_check_context_used (cfg, klass);
3258 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3259 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3263 n = mono_class_native_size (klass, &align);
3265 n = mono_class_value_size (klass, &align);
3267 /* if native is true there should be no references in the struct */
3268 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3269 /* Avoid barriers when storing to the stack */
3270 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3271 (dest->opcode == OP_LDADDR))) {
3277 context_used = mini_class_check_context_used (cfg, klass);
3279 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3280 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3282 } else if (context_used) {
3283 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3285 if (cfg->compile_aot) {
3286 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3288 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3289 mono_class_compute_gc_descriptor (klass);
3294 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3296 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3301 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3302 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3303 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3308 iargs [2] = size_ins;
3310 EMIT_NEW_ICONST (cfg, iargs [2], n);
3312 memcpy_method = get_memcpy_method ();
3314 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3316 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3321 get_memset_method (void)
3323 static MonoMethod *memset_method = NULL;
3324 if (!memset_method) {
3325 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3327 g_error ("Old corlib found. Install a new one");
3329 return memset_method;
3333 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3335 MonoInst *iargs [3];
3336 int n, context_used;
3338 MonoMethod *memset_method;
3339 MonoInst *size_ins = NULL;
3340 MonoInst *bzero_ins = NULL;
3341 static MonoMethod *bzero_method;
3343 /* FIXME: Optimize this for the case when dest is an LDADDR */
3345 mono_class_init (klass);
3346 if (mini_is_gsharedvt_klass (cfg, klass)) {
3347 context_used = mini_class_check_context_used (cfg, klass);
3348 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3349 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3351 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3352 g_assert (bzero_method);
3354 iargs [1] = size_ins;
3355 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3359 n = mono_class_value_size (klass, &align);
3361 if (n <= sizeof (gpointer) * 8) {
3362 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3365 memset_method = get_memset_method ();
3367 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3368 EMIT_NEW_ICONST (cfg, iargs [2], n);
3369 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3374 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3376 MonoInst *this = NULL;
3378 g_assert (cfg->generic_sharing_context);
3380 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3381 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3382 !method->klass->valuetype)
3383 EMIT_NEW_ARGLOAD (cfg, this, 0);
3385 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3386 MonoInst *mrgctx_loc, *mrgctx_var;
3389 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3391 mrgctx_loc = mono_get_vtable_var (cfg);
3392 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3395 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3396 MonoInst *vtable_loc, *vtable_var;
3400 vtable_loc = mono_get_vtable_var (cfg);
3401 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3403 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3404 MonoInst *mrgctx_var = vtable_var;
3407 vtable_reg = alloc_preg (cfg);
3408 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3409 vtable_var->type = STACK_PTR;
3417 vtable_reg = alloc_preg (cfg);
3418 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3423 static MonoJumpInfoRgctxEntry *
3424 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3426 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3427 res->method = method;
3428 res->in_mrgctx = in_mrgctx;
3429 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3430 res->data->type = patch_type;
3431 res->data->data.target = patch_data;
3432 res->info_type = info_type;
3437 static inline MonoInst*
3438 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3440 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3444 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3445 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3447 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3448 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3450 return emit_rgctx_fetch (cfg, rgctx, entry);
3454 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3455 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3457 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3458 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3460 return emit_rgctx_fetch (cfg, rgctx, entry);
3464 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3465 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3467 MonoJumpInfoGSharedVtCall *call_info;
3468 MonoJumpInfoRgctxEntry *entry;
3471 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3472 call_info->sig = sig;
3473 call_info->method = cmethod;
3475 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3476 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3478 return emit_rgctx_fetch (cfg, rgctx, entry);
3483 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3484 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3486 MonoJumpInfoRgctxEntry *entry;
3489 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3490 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3492 return emit_rgctx_fetch (cfg, rgctx, entry);
3496 * emit_get_rgctx_method:
3498 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3499 * normal constants, else emit a load from the rgctx.
3502 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3503 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3505 if (!context_used) {
3508 switch (rgctx_type) {
3509 case MONO_RGCTX_INFO_METHOD:
3510 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3512 case MONO_RGCTX_INFO_METHOD_RGCTX:
3513 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3516 g_assert_not_reached ();
3519 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3520 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3522 return emit_rgctx_fetch (cfg, rgctx, entry);
3527 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3528 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3530 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3531 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3533 return emit_rgctx_fetch (cfg, rgctx, entry);
3537 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3539 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3540 MonoRuntimeGenericContextInfoTemplate *template;
3545 for (i = 0; i < info->num_entries; ++i) {
3546 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3548 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3552 if (info->num_entries == info->count_entries) {
3553 MonoRuntimeGenericContextInfoTemplate *new_entries;
3554 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3556 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3558 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3559 info->entries = new_entries;
3560 info->count_entries = new_count_entries;
3563 idx = info->num_entries;
3564 template = &info->entries [idx];
3565 template->info_type = rgctx_type;
3566 template->data = data;
3568 info->num_entries ++;
3574 * emit_get_gsharedvt_info:
3576 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3579 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3584 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3585 /* Load info->entries [idx] */
3586 dreg = alloc_preg (cfg);
3587 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3593 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3595 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3599 * On return the caller must check @klass for load errors.
3602 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3604 MonoInst *vtable_arg;
3608 context_used = mini_class_check_context_used (cfg, klass);
3611 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3612 klass, MONO_RGCTX_INFO_VTABLE);
3614 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3618 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3621 if (COMPILE_LLVM (cfg))
3622 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3624 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3625 #ifdef MONO_ARCH_VTABLE_REG
3626 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3627 cfg->uses_vtable_reg = TRUE;
3634 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3638 if (cfg->gen_seq_points && cfg->method == method) {
3639 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3641 ins->flags |= MONO_INST_NONEMPTY_STACK;
3642 MONO_ADD_INS (cfg->cbb, ins);
3647 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3649 if (mini_get_debug_options ()->better_cast_details) {
3650 int vtable_reg = alloc_preg (cfg);
3651 int klass_reg = alloc_preg (cfg);
3652 MonoBasicBlock *is_null_bb = NULL;
3654 int to_klass_reg, context_used;
3657 NEW_BBLOCK (cfg, is_null_bb);
3659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3660 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3663 tls_get = mono_get_jit_tls_intrinsic (cfg);
3665 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3669 MONO_ADD_INS (cfg->cbb, tls_get);
3670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3673 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3675 context_used = mini_class_check_context_used (cfg, klass);
3677 MonoInst *class_ins;
3679 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3680 to_klass_reg = class_ins->dreg;
3682 to_klass_reg = alloc_preg (cfg);
3683 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3688 MONO_START_BB (cfg, is_null_bb);
3690 *out_bblock = cfg->cbb;
3696 reset_cast_details (MonoCompile *cfg)
3698 /* Reset the variables holding the cast details */
3699 if (mini_get_debug_options ()->better_cast_details) {
3700 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3702 MONO_ADD_INS (cfg->cbb, tls_get);
3703 /* It is enough to reset the from field */
3704 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3709 * On return the caller must check @array_class for load errors
3712 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3714 int vtable_reg = alloc_preg (cfg);
3717 context_used = mini_class_check_context_used (cfg, array_class);
3719 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3721 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3723 if (cfg->opt & MONO_OPT_SHARED) {
3724 int class_reg = alloc_preg (cfg);
3725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3726 if (cfg->compile_aot) {
3727 int klass_reg = alloc_preg (cfg);
3728 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3729 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3733 } else if (context_used) {
3734 MonoInst *vtable_ins;
3736 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3737 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3739 if (cfg->compile_aot) {
3743 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3745 vt_reg = alloc_preg (cfg);
3746 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3750 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3756 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3758 reset_cast_details (cfg);
3762 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3763 * generic code is generated.
3766 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3768 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3771 MonoInst *rgctx, *addr;
3773 /* FIXME: What if the class is shared? We might not
3774 have to get the address of the method from the
3776 addr = emit_get_rgctx_method (cfg, context_used, method,
3777 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3779 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3781 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3783 gboolean pass_vtable, pass_mrgctx;
3784 MonoInst *rgctx_arg = NULL;
3786 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3787 g_assert (!pass_mrgctx);
3790 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3793 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3796 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3801 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3805 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3806 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3807 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3808 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3810 obj_reg = sp [0]->dreg;
3811 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3814 /* FIXME: generics */
3815 g_assert (klass->rank == 0);
3818 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3819 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3825 MonoInst *element_class;
3827 /* This assertion is from the unboxcast insn */
3828 g_assert (klass->rank == 0);
3830 element_class = emit_get_rgctx_klass (cfg, context_used,
3831 klass->element_class, MONO_RGCTX_INFO_KLASS);
3833 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3834 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3836 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3837 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3838 reset_cast_details (cfg);
3841 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3842 MONO_ADD_INS (cfg->cbb, add);
3843 add->type = STACK_MP;
3850 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3852 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3853 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3857 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3863 args [1] = klass_inst;
3866 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3868 NEW_BBLOCK (cfg, is_ref_bb);
3869 NEW_BBLOCK (cfg, is_nullable_bb);
3870 NEW_BBLOCK (cfg, end_bb);
3871 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3878 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3879 addr_reg = alloc_dreg (cfg, STACK_MP);
3883 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3884 MONO_ADD_INS (cfg->cbb, addr);
3886 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3889 MONO_START_BB (cfg, is_ref_bb);
3891 /* Save the ref to a temporary */
3892 dreg = alloc_ireg (cfg);
3893 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3894 addr->dreg = addr_reg;
3895 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3896 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3899 MONO_START_BB (cfg, is_nullable_bb);
3902 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3903 MonoInst *unbox_call;
3904 MonoMethodSignature *unbox_sig;
3907 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3909 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3910 unbox_sig->ret = &klass->byval_arg;
3911 unbox_sig->param_count = 1;
3912 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3913 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3915 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3916 addr->dreg = addr_reg;
3919 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3922 MONO_START_BB (cfg, end_bb);
3925 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3927 *out_cbb = cfg->cbb;
3933 * Returns NULL and set the cfg exception on error.
3936 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3938 MonoInst *iargs [2];
3944 MonoInst *iargs [2];
3945 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3947 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3949 if (cfg->opt & MONO_OPT_SHARED)
3950 rgctx_info = MONO_RGCTX_INFO_KLASS;
3952 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3953 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3955 if (cfg->opt & MONO_OPT_SHARED) {
3956 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3958 alloc_ftn = mono_object_new;
3961 alloc_ftn = mono_object_new_specific;
3964 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3965 if (known_instance_size)
3966 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
3967 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3970 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3973 if (cfg->opt & MONO_OPT_SHARED) {
3974 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3975 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3977 alloc_ftn = mono_object_new;
3978 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3979 /* This happens often in argument checking code, eg. throw new FooException... */
3980 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3981 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3982 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3984 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3985 MonoMethod *managed_alloc = NULL;
3989 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3990 cfg->exception_ptr = klass;
3994 #ifndef MONO_CROSS_COMPILE
3995 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3998 if (managed_alloc) {
3999 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4000 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
4001 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4003 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4005 guint32 lw = vtable->klass->instance_size;
4006 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4007 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4008 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4011 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4015 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4019 * Returns NULL and set the cfg exception on error.
4022 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4024 MonoInst *alloc, *ins;
4026 *out_cbb = cfg->cbb;
4028 if (mono_class_is_nullable (klass)) {
4029 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4032 /* FIXME: What if the class is shared? We might not
4033 have to get the method address from the RGCTX. */
4034 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4035 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4036 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4038 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4040 gboolean pass_vtable, pass_mrgctx;
4041 MonoInst *rgctx_arg = NULL;
4043 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4044 g_assert (!pass_mrgctx);
4047 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4050 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4053 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4057 if (mini_is_gsharedvt_klass (cfg, klass)) {
4058 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4059 MonoInst *res, *is_ref, *src_var, *addr;
4062 dreg = alloc_ireg (cfg);
4064 NEW_BBLOCK (cfg, is_ref_bb);
4065 NEW_BBLOCK (cfg, is_nullable_bb);
4066 NEW_BBLOCK (cfg, end_bb);
4067 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4068 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4069 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4071 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4072 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4075 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4078 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4079 ins->opcode = OP_STOREV_MEMBASE;
4081 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4082 res->type = STACK_OBJ;
4084 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4087 MONO_START_BB (cfg, is_ref_bb);
4088 addr_reg = alloc_ireg (cfg);
4090 /* val is a vtype, so has to load the value manually */
4091 src_var = get_vreg_to_inst (cfg, val->dreg);
4093 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4094 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4095 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4096 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4099 MONO_START_BB (cfg, is_nullable_bb);
4102 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4103 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4105 MonoMethodSignature *box_sig;
4108 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4109 * construct that method at JIT time, so have to do things by hand.
4111 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4112 box_sig->ret = &mono_defaults.object_class->byval_arg;
4113 box_sig->param_count = 1;
4114 box_sig->params [0] = &klass->byval_arg;
4115 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4116 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4117 res->type = STACK_OBJ;
4121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4123 MONO_START_BB (cfg, end_bb);
4125 *out_cbb = cfg->cbb;
4129 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4133 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4140 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4143 MonoGenericContainer *container;
4144 MonoGenericInst *ginst;
4146 if (klass->generic_class) {
4147 container = klass->generic_class->container_class->generic_container;
4148 ginst = klass->generic_class->context.class_inst;
4149 } else if (klass->generic_container && context_used) {
4150 container = klass->generic_container;
4151 ginst = container->context.class_inst;
4156 for (i = 0; i < container->type_argc; ++i) {
4158 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4160 type = ginst->type_argv [i];
4161 if (mini_type_is_reference (cfg, type))
4167 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4170 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4172 MonoMethod *mono_castclass;
4175 mono_castclass = mono_marshal_get_castclass_with_cache ();
4177 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4178 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4179 reset_cast_details (cfg);
4180 *out_bblock = cfg->cbb;
4186 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4195 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4198 if (cfg->compile_aot) {
4199 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4200 cfg->castclass_cache_index ++;
4201 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4202 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4204 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4207 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4209 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4213 * Returns NULL and set the cfg exception on error.
4216 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4218 MonoBasicBlock *is_null_bb;
4219 int obj_reg = src->dreg;
4220 int vtable_reg = alloc_preg (cfg);
4222 MonoInst *klass_inst = NULL, *res;
4223 MonoBasicBlock *bblock;
4227 context_used = mini_class_check_context_used (cfg, klass);
4229 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4230 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4231 (*inline_costs) += 2;
4234 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4235 MonoMethod *mono_castclass;
4236 MonoInst *iargs [1];
4239 mono_castclass = mono_marshal_get_castclass (klass);
4242 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4243 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4244 iargs, ip, cfg->real_offset, TRUE, &bblock);
4245 reset_cast_details (cfg);
4246 CHECK_CFG_EXCEPTION;
4247 g_assert (costs > 0);
4249 cfg->real_offset += 5;
4251 (*inline_costs) += costs;
4260 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4261 MonoInst *cache_ins;
4263 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4268 /* klass - it's the second element of the cache entry*/
4269 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4272 args [2] = cache_ins;
4274 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4277 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4280 NEW_BBLOCK (cfg, is_null_bb);
4282 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4283 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4285 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4287 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4288 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4289 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4291 int klass_reg = alloc_preg (cfg);
4293 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4295 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4296 /* the remoting code is broken, access the class for now */
4297 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4298 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4300 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4301 cfg->exception_ptr = klass;
4304 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4306 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4307 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4309 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4311 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4312 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4316 MONO_START_BB (cfg, is_null_bb);
4318 reset_cast_details (cfg);
4329 * Returns NULL and set the cfg exception on error.
4332 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4335 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4336 int obj_reg = src->dreg;
4337 int vtable_reg = alloc_preg (cfg);
4338 int res_reg = alloc_ireg_ref (cfg);
4339 MonoInst *klass_inst = NULL;
4344 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4345 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4346 MonoInst *cache_ins;
4348 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4353 /* klass - it's the second element of the cache entry*/
4354 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4357 args [2] = cache_ins;
4359 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4362 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4365 NEW_BBLOCK (cfg, is_null_bb);
4366 NEW_BBLOCK (cfg, false_bb);
4367 NEW_BBLOCK (cfg, end_bb);
4369 /* Do the assignment at the beginning, so the other assignment can be if converted */
4370 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4371 ins->type = STACK_OBJ;
4374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4377 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4379 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4380 g_assert (!context_used);
4381 /* the is_null_bb target simply copies the input register to the output */
4382 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4384 int klass_reg = alloc_preg (cfg);
4387 int rank_reg = alloc_preg (cfg);
4388 int eclass_reg = alloc_preg (cfg);
4390 g_assert (!context_used);
4391 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4393 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4394 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4395 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4396 if (klass->cast_class == mono_defaults.object_class) {
4397 int parent_reg = alloc_preg (cfg);
4398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4399 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4400 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4401 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4402 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4403 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4404 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4405 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4406 } else if (klass->cast_class == mono_defaults.enum_class) {
4407 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4408 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4409 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4410 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4412 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4413 /* Check that the object is a vector too */
4414 int bounds_reg = alloc_preg (cfg);
4415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4417 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4420 /* the is_null_bb target simply copies the input register to the output */
4421 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4423 } else if (mono_class_is_nullable (klass)) {
4424 g_assert (!context_used);
4425 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4426 /* the is_null_bb target simply copies the input register to the output */
4427 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4429 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4430 g_assert (!context_used);
4431 /* the remoting code is broken, access the class for now */
4432 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4433 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4435 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4436 cfg->exception_ptr = klass;
4439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4441 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4444 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4445 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4447 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4448 /* the is_null_bb target simply copies the input register to the output */
4449 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4454 MONO_START_BB (cfg, false_bb);
4456 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4457 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4459 MONO_START_BB (cfg, is_null_bb);
4461 MONO_START_BB (cfg, end_bb);
4467 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4469 /* This opcode takes as input an object reference and a class, and returns:
4470 0) if the object is an instance of the class,
4471 1) if the object is not instance of the class,
4472 2) if the object is a proxy whose type cannot be determined */
4475 #ifndef DISABLE_REMOTING
4476 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4478 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4480 int obj_reg = src->dreg;
4481 int dreg = alloc_ireg (cfg);
4483 #ifndef DISABLE_REMOTING
4484 int klass_reg = alloc_preg (cfg);
4487 NEW_BBLOCK (cfg, true_bb);
4488 NEW_BBLOCK (cfg, false_bb);
4489 NEW_BBLOCK (cfg, end_bb);
4490 #ifndef DISABLE_REMOTING
4491 NEW_BBLOCK (cfg, false2_bb);
4492 NEW_BBLOCK (cfg, no_proxy_bb);
4495 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4498 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4499 #ifndef DISABLE_REMOTING
4500 NEW_BBLOCK (cfg, interface_fail_bb);
4503 tmp_reg = alloc_preg (cfg);
4504 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4505 #ifndef DISABLE_REMOTING
4506 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4507 MONO_START_BB (cfg, interface_fail_bb);
4508 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4510 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4512 tmp_reg = alloc_preg (cfg);
4513 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4515 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4517 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4520 #ifndef DISABLE_REMOTING
4521 tmp_reg = alloc_preg (cfg);
4522 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4523 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4525 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4526 tmp_reg = alloc_preg (cfg);
4527 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4528 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4530 tmp_reg = alloc_preg (cfg);
4531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4532 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4533 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4535 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4538 MONO_START_BB (cfg, no_proxy_bb);
4540 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4542 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4546 MONO_START_BB (cfg, false_bb);
4548 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4551 #ifndef DISABLE_REMOTING
4552 MONO_START_BB (cfg, false2_bb);
4554 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4558 MONO_START_BB (cfg, true_bb);
4560 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4562 MONO_START_BB (cfg, end_bb);
4565 MONO_INST_NEW (cfg, ins, OP_ICONST);
4567 ins->type = STACK_I4;
4573 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4575 /* This opcode takes as input an object reference and a class, and returns:
4576 0) if the object is an instance of the class,
4577 1) if the object is a proxy whose type cannot be determined
4578 an InvalidCastException exception is thrown otherwhise*/
4581 #ifndef DISABLE_REMOTING
4582 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4584 MonoBasicBlock *ok_result_bb;
4586 int obj_reg = src->dreg;
4587 int dreg = alloc_ireg (cfg);
4588 int tmp_reg = alloc_preg (cfg);
4590 #ifndef DISABLE_REMOTING
4591 int klass_reg = alloc_preg (cfg);
4592 NEW_BBLOCK (cfg, end_bb);
4595 NEW_BBLOCK (cfg, ok_result_bb);
4597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4598 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4600 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4602 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4603 #ifndef DISABLE_REMOTING
4604 NEW_BBLOCK (cfg, interface_fail_bb);
4606 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4607 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4608 MONO_START_BB (cfg, interface_fail_bb);
4609 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4611 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4613 tmp_reg = alloc_preg (cfg);
4614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4616 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4618 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4622 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4626 #ifndef DISABLE_REMOTING
4627 NEW_BBLOCK (cfg, no_proxy_bb);
4629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4630 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4631 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4633 tmp_reg = alloc_preg (cfg);
4634 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4637 tmp_reg = alloc_preg (cfg);
4638 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4642 NEW_BBLOCK (cfg, fail_1_bb);
4644 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4646 MONO_START_BB (cfg, fail_1_bb);
4648 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4649 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4651 MONO_START_BB (cfg, no_proxy_bb);
4653 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4655 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4659 MONO_START_BB (cfg, ok_result_bb);
4661 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4663 #ifndef DISABLE_REMOTING
4664 MONO_START_BB (cfg, end_bb);
4668 MONO_INST_NEW (cfg, ins, OP_ICONST);
4670 ins->type = STACK_I4;
4675 static G_GNUC_UNUSED MonoInst*
4676 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4678 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4679 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4680 gboolean is_i4 = TRUE;
4682 switch (enum_type->type) {
4685 #if SIZEOF_REGISTER == 8
4694 MonoInst *load, *and, *cmp, *ceq;
4695 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4696 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4697 int dest_reg = alloc_ireg (cfg);
4699 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4700 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4701 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4702 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4704 ceq->type = STACK_I4;
4707 load = mono_decompose_opcode (cfg, load);
4708 and = mono_decompose_opcode (cfg, and);
4709 cmp = mono_decompose_opcode (cfg, cmp);
4710 ceq = mono_decompose_opcode (cfg, ceq);
4718 * Returns NULL and set the cfg exception on error.
4720 static G_GNUC_UNUSED MonoInst*
4721 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4725 gpointer trampoline;
4726 MonoInst *obj, *method_ins, *tramp_ins;
4730 // FIXME reenable optimisation for virtual case
4735 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4738 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4742 obj = handle_alloc (cfg, klass, FALSE, 0);
4746 /* Inline the contents of mono_delegate_ctor */
4748 /* Set target field */
4749 /* Optimize away setting of NULL target */
4750 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4752 if (cfg->gen_write_barriers) {
4753 dreg = alloc_preg (cfg);
4754 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4755 emit_write_barrier (cfg, ptr, target);
4759 /* Set method field */
4760 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4761 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4764 * To avoid looking up the compiled code belonging to the target method
4765 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4766 * store it, and we fill it after the method has been compiled.
4768 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4769 MonoInst *code_slot_ins;
4772 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4774 domain = mono_domain_get ();
4775 mono_domain_lock (domain);
4776 if (!domain_jit_info (domain)->method_code_hash)
4777 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4778 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4780 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4781 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4783 mono_domain_unlock (domain);
4785 if (cfg->compile_aot)
4786 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4788 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4793 if (cfg->compile_aot) {
4794 MonoDelegateClassMethodPair *del_tramp;
4796 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4797 del_tramp->klass = klass;
4798 del_tramp->method = context_used ? NULL : method;
4799 del_tramp->virtual = virtual;
4800 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4803 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4805 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4806 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4809 /* Set invoke_impl field */
4811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4813 dreg = alloc_preg (cfg);
4814 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4815 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4817 dreg = alloc_preg (cfg);
4818 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4819 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4822 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4828 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4830 MonoJitICallInfo *info;
4832 /* Need to register the icall so it gets an icall wrapper */
4833 info = mono_get_array_new_va_icall (rank);
4835 cfg->flags |= MONO_CFG_HAS_VARARGS;
4837 /* mono_array_new_va () needs a vararg calling convention */
4838 cfg->disable_llvm = TRUE;
4840 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4841 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4845 * handle_constrained_gsharedvt_call:
4847 * Handle constrained calls where the receiver is a gsharedvt type.
4848 * Return the instruction representing the call. Set the cfg exception on failure.
4851 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_call,
4852 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4854 MonoInst *ins = NULL;
4855 MonoBasicBlock *bblock = *ref_bblock;
4856 gboolean emit_widen = *ref_emit_widen;
4859 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4860 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4861 * pack the arguments into an array, and do the rest of the work in in an icall.
4863 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4864 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4865 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4866 MonoInst *args [16];
4869 * This case handles calls to
4870 * - object:ToString()/Equals()/GetHashCode(),
4871 * - System.IComparable<T>:CompareTo()
4872 * - System.IEquatable<T>:Equals ()
4873 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4877 if (mono_method_check_context_used (cmethod))
4878 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4880 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4881 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
4883 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4884 if (fsig->hasthis && fsig->param_count) {
4885 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4886 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4887 ins->dreg = alloc_preg (cfg);
4888 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4889 MONO_ADD_INS (cfg->cbb, ins);
4892 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4895 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4897 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4898 addr_reg = ins->dreg;
4899 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4901 EMIT_NEW_ICONST (cfg, args [3], 0);
4902 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4905 EMIT_NEW_ICONST (cfg, args [3], 0);
4906 EMIT_NEW_ICONST (cfg, args [4], 0);
4908 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4911 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
4912 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
4913 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4917 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4918 MONO_ADD_INS (cfg->cbb, add);
4920 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4921 MONO_ADD_INS (cfg->cbb, ins);
4922 /* ins represents the call result */
4925 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4928 *ref_emit_widen = emit_widen;
4929 *ref_bblock = bblock;
4938 mono_emit_load_got_addr (MonoCompile *cfg)
4940 MonoInst *getaddr, *dummy_use;
4942 if (!cfg->got_var || cfg->got_var_allocated)
4945 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4946 getaddr->cil_code = cfg->header->code;
4947 getaddr->dreg = cfg->got_var->dreg;
4949 /* Add it to the start of the first bblock */
4950 if (cfg->bb_entry->code) {
4951 getaddr->next = cfg->bb_entry->code;
4952 cfg->bb_entry->code = getaddr;
4955 MONO_ADD_INS (cfg->bb_entry, getaddr);
4957 cfg->got_var_allocated = TRUE;
4960 * Add a dummy use to keep the got_var alive, since real uses might
4961 * only be generated by the back ends.
4962 * Add it to end_bblock, so the variable's lifetime covers the whole
4964 * It would be better to make the usage of the got var explicit in all
4965 * cases when the backend needs it (i.e. calls, throw etc.), so this
4966 * wouldn't be needed.
4968 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4969 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4972 static int inline_limit;
4973 static gboolean inline_limit_inited;
4976 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4978 MonoMethodHeaderSummary header;
4980 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4981 MonoMethodSignature *sig = mono_method_signature (method);
4985 if (cfg->disable_inline)
4987 if (cfg->generic_sharing_context)
4990 if (cfg->inline_depth > 10)
4993 #ifdef MONO_ARCH_HAVE_LMF_OPS
4994 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4995 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4996 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5001 if (!mono_method_get_header_summary (method, &header))
5004 /*runtime, icall and pinvoke are checked by summary call*/
5005 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5006 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5007 (mono_class_is_marshalbyref (method->klass)) ||
5011 /* also consider num_locals? */
5012 /* Do the size check early to avoid creating vtables */
5013 if (!inline_limit_inited) {
5014 if (g_getenv ("MONO_INLINELIMIT"))
5015 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5017 inline_limit = INLINE_LENGTH_LIMIT;
5018 inline_limit_inited = TRUE;
5020 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5024 * if we can initialize the class of the method right away, we do,
5025 * otherwise we don't allow inlining if the class needs initialization,
5026 * since it would mean inserting a call to mono_runtime_class_init()
5027 * inside the inlined code
5029 if (!(cfg->opt & MONO_OPT_SHARED)) {
5030 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5031 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5032 vtable = mono_class_vtable (cfg->domain, method->klass);
5035 if (!cfg->compile_aot)
5036 mono_runtime_class_init (vtable);
5037 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5038 if (cfg->run_cctors && method->klass->has_cctor) {
5039 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5040 if (!method->klass->runtime_info)
5041 /* No vtable created yet */
5043 vtable = mono_class_vtable (cfg->domain, method->klass);
5046 /* This makes so that inline cannot trigger */
5047 /* .cctors: too many apps depend on them */
5048 /* running with a specific order... */
5049 if (! vtable->initialized)
5051 mono_runtime_class_init (vtable);
5053 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5054 if (!method->klass->runtime_info)
5055 /* No vtable created yet */
5057 vtable = mono_class_vtable (cfg->domain, method->klass);
5060 if (!vtable->initialized)
5065 * If we're compiling for shared code
5066 * the cctor will need to be run at aot method load time, for example,
5067 * or at the end of the compilation of the inlining method.
5069 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5074 * CAS - do not inline methods with declarative security
5075 * Note: this has to be before any possible return TRUE;
5077 if (mono_security_method_has_declsec (method))
5080 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5081 if (mono_arch_is_soft_float ()) {
5083 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5085 for (i = 0; i < sig->param_count; ++i)
5086 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5091 if (g_list_find (cfg->dont_inline, method))
5098 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5100 if (!cfg->compile_aot) {
5102 if (vtable->initialized)
5106 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5107 if (cfg->method == method)
5111 if (!mono_class_needs_cctor_run (klass, method))
5114 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5115 /* The initialization is already done before the method is called */
5122 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5126 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5129 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5132 mono_class_init (klass);
5133 size = mono_class_array_element_size (klass);
5136 mult_reg = alloc_preg (cfg);
5137 array_reg = arr->dreg;
5138 index_reg = index->dreg;
5140 #if SIZEOF_REGISTER == 8
5141 /* The array reg is 64 bits but the index reg is only 32 */
5142 if (COMPILE_LLVM (cfg)) {
5144 index2_reg = index_reg;
5146 index2_reg = alloc_preg (cfg);
5147 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5150 if (index->type == STACK_I8) {
5151 index2_reg = alloc_preg (cfg);
5152 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5154 index2_reg = index_reg;
5159 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5161 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5162 if (size == 1 || size == 2 || size == 4 || size == 8) {
5163 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5165 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5166 ins->klass = mono_class_get_element_class (klass);
5167 ins->type = STACK_MP;
5173 add_reg = alloc_ireg_mp (cfg);
5176 MonoInst *rgctx_ins;
5179 g_assert (cfg->generic_sharing_context);
5180 context_used = mini_class_check_context_used (cfg, klass);
5181 g_assert (context_used);
5182 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5183 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5185 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5187 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5188 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5189 ins->klass = mono_class_get_element_class (klass);
5190 ins->type = STACK_MP;
5191 MONO_ADD_INS (cfg->cbb, ins);
5196 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5198 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5200 int bounds_reg = alloc_preg (cfg);
5201 int add_reg = alloc_ireg_mp (cfg);
5202 int mult_reg = alloc_preg (cfg);
5203 int mult2_reg = alloc_preg (cfg);
5204 int low1_reg = alloc_preg (cfg);
5205 int low2_reg = alloc_preg (cfg);
5206 int high1_reg = alloc_preg (cfg);
5207 int high2_reg = alloc_preg (cfg);
5208 int realidx1_reg = alloc_preg (cfg);
5209 int realidx2_reg = alloc_preg (cfg);
5210 int sum_reg = alloc_preg (cfg);
5211 int index1, index2, tmpreg;
5215 mono_class_init (klass);
5216 size = mono_class_array_element_size (klass);
5218 index1 = index_ins1->dreg;
5219 index2 = index_ins2->dreg;
5221 #if SIZEOF_REGISTER == 8
5222 /* The array reg is 64 bits but the index reg is only 32 */
5223 if (COMPILE_LLVM (cfg)) {
5226 tmpreg = alloc_preg (cfg);
5227 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5229 tmpreg = alloc_preg (cfg);
5230 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5234 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5238 /* range checking */
5239 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5240 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5242 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5243 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5244 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5245 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5246 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5247 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5248 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5250 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5251 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5252 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5253 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5254 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5255 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5256 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5258 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5259 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5260 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5261 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5262 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5264 ins->type = STACK_MP;
5266 MONO_ADD_INS (cfg->cbb, ins);
5273 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5277 MonoMethod *addr_method;
5280 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5283 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
5285 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5286 /* emit_ldelema_2 depends on OP_LMUL */
5287 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
5288 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
5292 element_size = mono_class_array_element_size (cmethod->klass->element_class);
5293 addr_method = mono_marshal_get_array_address (rank, element_size);
5294 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5299 static MonoBreakPolicy
5300 always_insert_breakpoint (MonoMethod *method)
5302 return MONO_BREAK_POLICY_ALWAYS;
5305 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5308 * mono_set_break_policy:
5309 * policy_callback: the new callback function
5311 * Allow embedders to decide wherther to actually obey breakpoint instructions
5312 * (both break IL instructions and Debugger.Break () method calls), for example
5313 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5314 * untrusted or semi-trusted code.
5316 * @policy_callback will be called every time a break point instruction needs to
5317 * be inserted with the method argument being the method that calls Debugger.Break()
5318 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5319 * if it wants the breakpoint to not be effective in the given method.
5320 * #MONO_BREAK_POLICY_ALWAYS is the default.
5323 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5325 if (policy_callback)
5326 break_policy_func = policy_callback;
5328 break_policy_func = always_insert_breakpoint;
5332 should_insert_brekpoint (MonoMethod *method) {
5333 switch (break_policy_func (method)) {
5334 case MONO_BREAK_POLICY_ALWAYS:
5336 case MONO_BREAK_POLICY_NEVER:
5338 case MONO_BREAK_POLICY_ON_DBG:
5339 g_warning ("mdb no longer supported");
5342 g_warning ("Incorrect value returned from break policy callback");
5347 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5349 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5351 MonoInst *addr, *store, *load;
5352 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5354 /* the bounds check is already done by the callers */
5355 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5357 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5358 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5359 if (mini_type_is_reference (cfg, fsig->params [2]))
5360 emit_write_barrier (cfg, addr, load);
5362 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5363 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5370 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5372 return mini_type_is_reference (cfg, &klass->byval_arg);
5376 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5378 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5379 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5380 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5381 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5382 MonoInst *iargs [3];
5385 mono_class_setup_vtable (obj_array);
5386 g_assert (helper->slot);
5388 if (sp [0]->type != STACK_OBJ)
5390 if (sp [2]->type != STACK_OBJ)
5397 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5401 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5404 // FIXME-VT: OP_ICONST optimization
5405 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5406 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5407 ins->opcode = OP_STOREV_MEMBASE;
5408 } else if (sp [1]->opcode == OP_ICONST) {
5409 int array_reg = sp [0]->dreg;
5410 int index_reg = sp [1]->dreg;
5411 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5414 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5415 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5417 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5418 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5419 if (generic_class_is_reference_type (cfg, klass))
5420 emit_write_barrier (cfg, addr, sp [2]);
5427 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5432 eklass = mono_class_from_mono_type (fsig->params [2]);
5434 eklass = mono_class_from_mono_type (fsig->ret);
5437 return emit_array_store (cfg, eklass, args, FALSE);
5439 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5440 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5446 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5450 //Only allow for valuetypes
5451 if (!param_klass->valuetype || !return_klass->valuetype)
5455 if (param_klass->has_references || return_klass->has_references)
5458 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5459 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5460 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5463 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5464 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5467 //And have the same size
5468 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5474 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5476 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5477 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5479 //Valuetypes that are semantically equivalent
5480 if (is_unsafe_mov_compatible (param_klass, return_klass))
5483 //Arrays of valuetypes that are semantically equivalent
5484 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5491 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5493 #ifdef MONO_ARCH_SIMD_INTRINSICS
5494 MonoInst *ins = NULL;
5496 if (cfg->opt & MONO_OPT_SIMD) {
5497 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5503 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5507 emit_memory_barrier (MonoCompile *cfg, int kind)
5509 MonoInst *ins = NULL;
5510 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5511 MONO_ADD_INS (cfg->cbb, ins);
5512 ins->backend.memory_barrier_kind = kind;
5518 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5520 MonoInst *ins = NULL;
5523 /* The LLVM backend supports these intrinsics */
5524 if (cmethod->klass == mono_defaults.math_class) {
5525 if (strcmp (cmethod->name, "Sin") == 0) {
5527 } else if (strcmp (cmethod->name, "Cos") == 0) {
5529 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5531 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5535 if (opcode && fsig->param_count == 1) {
5536 MONO_INST_NEW (cfg, ins, opcode);
5537 ins->type = STACK_R8;
5538 ins->dreg = mono_alloc_freg (cfg);
5539 ins->sreg1 = args [0]->dreg;
5540 MONO_ADD_INS (cfg->cbb, ins);
5544 if (cfg->opt & MONO_OPT_CMOV) {
5545 if (strcmp (cmethod->name, "Min") == 0) {
5546 if (fsig->params [0]->type == MONO_TYPE_I4)
5548 if (fsig->params [0]->type == MONO_TYPE_U4)
5549 opcode = OP_IMIN_UN;
5550 else if (fsig->params [0]->type == MONO_TYPE_I8)
5552 else if (fsig->params [0]->type == MONO_TYPE_U8)
5553 opcode = OP_LMIN_UN;
5554 } else if (strcmp (cmethod->name, "Max") == 0) {
5555 if (fsig->params [0]->type == MONO_TYPE_I4)
5557 if (fsig->params [0]->type == MONO_TYPE_U4)
5558 opcode = OP_IMAX_UN;
5559 else if (fsig->params [0]->type == MONO_TYPE_I8)
5561 else if (fsig->params [0]->type == MONO_TYPE_U8)
5562 opcode = OP_LMAX_UN;
5566 if (opcode && fsig->param_count == 2) {
5567 MONO_INST_NEW (cfg, ins, opcode);
5568 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5569 ins->dreg = mono_alloc_ireg (cfg);
5570 ins->sreg1 = args [0]->dreg;
5571 ins->sreg2 = args [1]->dreg;
5572 MONO_ADD_INS (cfg->cbb, ins);
5580 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5582 if (cmethod->klass == mono_defaults.array_class) {
5583 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5584 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5585 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5586 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5587 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5588 return emit_array_unsafe_mov (cfg, fsig, args);
5595 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5597 MonoInst *ins = NULL;
5599 static MonoClass *runtime_helpers_class = NULL;
5600 if (! runtime_helpers_class)
5601 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5602 "System.Runtime.CompilerServices", "RuntimeHelpers");
5604 if (cmethod->klass == mono_defaults.string_class) {
5605 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
5606 int dreg = alloc_ireg (cfg);
5607 int index_reg = alloc_preg (cfg);
5608 int mult_reg = alloc_preg (cfg);
5609 int add_reg = alloc_preg (cfg);
5611 #if SIZEOF_REGISTER == 8
5612 /* The array reg is 64 bits but the index reg is only 32 */
5613 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5615 index_reg = args [1]->dreg;
5617 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5619 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5620 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5621 add_reg = ins->dreg;
5622 /* Avoid a warning */
5624 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5628 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5629 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5630 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5632 type_from_op (cfg, ins, NULL, NULL);
5634 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5635 int dreg = alloc_ireg (cfg);
5636 /* Decompose later to allow more optimizations */
5637 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5638 ins->type = STACK_I4;
5639 ins->flags |= MONO_INST_FAULT;
5640 cfg->cbb->has_array_access = TRUE;
5641 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5644 } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
5645 int mult_reg = alloc_preg (cfg);
5646 int add_reg = alloc_preg (cfg);
5648 /* The corlib functions check for oob already. */
5649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5650 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5651 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5652 return cfg->cbb->last_ins;
5655 } else if (cmethod->klass == mono_defaults.object_class) {
5657 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
5658 int dreg = alloc_ireg_ref (cfg);
5659 int vt_reg = alloc_preg (cfg);
5660 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5661 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5662 type_from_op (cfg, ins, NULL, NULL);
5665 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5666 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5667 int dreg = alloc_ireg (cfg);
5668 int t1 = alloc_ireg (cfg);
5670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5671 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5672 ins->type = STACK_I4;
5676 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5677 MONO_INST_NEW (cfg, ins, OP_NOP);
5678 MONO_ADD_INS (cfg->cbb, ins);
5682 } else if (cmethod->klass == mono_defaults.array_class) {
5683 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5684 return emit_array_generic_access (cfg, fsig, args, FALSE);
5685 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5686 return emit_array_generic_access (cfg, fsig, args, TRUE);
5688 #ifndef MONO_BIG_ARRAYS
5690 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5693 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
5694 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
5695 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5696 int dreg = alloc_ireg (cfg);
5697 int bounds_reg = alloc_ireg_mp (cfg);
5698 MonoBasicBlock *end_bb, *szarray_bb;
5699 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5701 NEW_BBLOCK (cfg, end_bb);
5702 NEW_BBLOCK (cfg, szarray_bb);
5704 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5705 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5707 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5708 /* Non-szarray case */
5710 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5711 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5713 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5714 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5715 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5716 MONO_START_BB (cfg, szarray_bb);
5719 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5720 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5722 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5723 MONO_START_BB (cfg, end_bb);
5725 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5726 ins->type = STACK_I4;
5732 if (cmethod->name [0] != 'g')
5735 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
5736 int dreg = alloc_ireg (cfg);
5737 int vtable_reg = alloc_preg (cfg);
5738 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5739 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5740 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5741 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5742 type_from_op (cfg, ins, NULL, NULL);
5745 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5746 int dreg = alloc_ireg (cfg);
5748 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5749 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5750 type_from_op (cfg, ins, NULL, NULL);
5755 } else if (cmethod->klass == runtime_helpers_class) {
5757 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5758 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5762 } else if (cmethod->klass == mono_defaults.thread_class) {
5763 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5764 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5765 MONO_ADD_INS (cfg->cbb, ins);
5767 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5768 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5769 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5771 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5773 if (fsig->params [0]->type == MONO_TYPE_I1)
5774 opcode = OP_LOADI1_MEMBASE;
5775 else if (fsig->params [0]->type == MONO_TYPE_U1)
5776 opcode = OP_LOADU1_MEMBASE;
5777 else if (fsig->params [0]->type == MONO_TYPE_I2)
5778 opcode = OP_LOADI2_MEMBASE;
5779 else if (fsig->params [0]->type == MONO_TYPE_U2)
5780 opcode = OP_LOADU2_MEMBASE;
5781 else if (fsig->params [0]->type == MONO_TYPE_I4)
5782 opcode = OP_LOADI4_MEMBASE;
5783 else if (fsig->params [0]->type == MONO_TYPE_U4)
5784 opcode = OP_LOADU4_MEMBASE;
5785 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5786 opcode = OP_LOADI8_MEMBASE;
5787 else if (fsig->params [0]->type == MONO_TYPE_R4)
5788 opcode = OP_LOADR4_MEMBASE;
5789 else if (fsig->params [0]->type == MONO_TYPE_R8)
5790 opcode = OP_LOADR8_MEMBASE;
5791 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5792 opcode = OP_LOAD_MEMBASE;
5795 MONO_INST_NEW (cfg, ins, opcode);
5796 ins->inst_basereg = args [0]->dreg;
5797 ins->inst_offset = 0;
5798 MONO_ADD_INS (cfg->cbb, ins);
5800 switch (fsig->params [0]->type) {
5807 ins->dreg = mono_alloc_ireg (cfg);
5808 ins->type = STACK_I4;
5812 ins->dreg = mono_alloc_lreg (cfg);
5813 ins->type = STACK_I8;
5817 ins->dreg = mono_alloc_ireg (cfg);
5818 #if SIZEOF_REGISTER == 8
5819 ins->type = STACK_I8;
5821 ins->type = STACK_I4;
5826 ins->dreg = mono_alloc_freg (cfg);
5827 ins->type = STACK_R8;
5830 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5831 ins->dreg = mono_alloc_ireg_ref (cfg);
5832 ins->type = STACK_OBJ;
5836 if (opcode == OP_LOADI8_MEMBASE)
5837 ins = mono_decompose_opcode (cfg, ins);
5839 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5843 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5845 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5847 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5848 opcode = OP_STOREI1_MEMBASE_REG;
5849 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5850 opcode = OP_STOREI2_MEMBASE_REG;
5851 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5852 opcode = OP_STOREI4_MEMBASE_REG;
5853 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5854 opcode = OP_STOREI8_MEMBASE_REG;
5855 else if (fsig->params [0]->type == MONO_TYPE_R4)
5856 opcode = OP_STORER4_MEMBASE_REG;
5857 else if (fsig->params [0]->type == MONO_TYPE_R8)
5858 opcode = OP_STORER8_MEMBASE_REG;
5859 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5860 opcode = OP_STORE_MEMBASE_REG;
5863 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5865 MONO_INST_NEW (cfg, ins, opcode);
5866 ins->sreg1 = args [1]->dreg;
5867 ins->inst_destbasereg = args [0]->dreg;
5868 ins->inst_offset = 0;
5869 MONO_ADD_INS (cfg->cbb, ins);
5871 if (opcode == OP_STOREI8_MEMBASE_REG)
5872 ins = mono_decompose_opcode (cfg, ins);
5877 } else if (cmethod->klass == mono_defaults.monitor_class) {
5878 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5879 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5882 if (COMPILE_LLVM (cfg)) {
5884 * Pass the argument normally, the LLVM backend will handle the
5885 * calling convention problems.
5887 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5889 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5890 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5891 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5892 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5895 return (MonoInst*)call;
5896 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5897 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5900 if (COMPILE_LLVM (cfg)) {
5902 * Pass the argument normally, the LLVM backend will handle the
5903 * calling convention problems.
5905 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5907 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5908 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5909 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5910 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5913 return (MonoInst*)call;
5915 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
5918 if (COMPILE_LLVM (cfg)) {
5919 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5921 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5922 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5923 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5924 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5927 return (MonoInst*)call;
5930 } else if (cmethod->klass->image == mono_defaults.corlib &&
5931 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5932 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5935 #if SIZEOF_REGISTER == 8
5936 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5937 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5938 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5939 ins->dreg = mono_alloc_preg (cfg);
5940 ins->sreg1 = args [0]->dreg;
5941 ins->type = STACK_I8;
5942 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5943 MONO_ADD_INS (cfg->cbb, ins);
5947 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5949 /* 64 bit reads are already atomic */
5950 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5951 load_ins->dreg = mono_alloc_preg (cfg);
5952 load_ins->inst_basereg = args [0]->dreg;
5953 load_ins->inst_offset = 0;
5954 load_ins->type = STACK_I8;
5955 MONO_ADD_INS (cfg->cbb, load_ins);
5957 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5964 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5965 MonoInst *ins_iconst;
5968 if (fsig->params [0]->type == MONO_TYPE_I4) {
5969 opcode = OP_ATOMIC_ADD_I4;
5970 cfg->has_atomic_add_i4 = TRUE;
5972 #if SIZEOF_REGISTER == 8
5973 else if (fsig->params [0]->type == MONO_TYPE_I8)
5974 opcode = OP_ATOMIC_ADD_I8;
5977 if (!mono_arch_opcode_supported (opcode))
5979 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5980 ins_iconst->inst_c0 = 1;
5981 ins_iconst->dreg = mono_alloc_ireg (cfg);
5982 MONO_ADD_INS (cfg->cbb, ins_iconst);
5984 MONO_INST_NEW (cfg, ins, opcode);
5985 ins->dreg = mono_alloc_ireg (cfg);
5986 ins->inst_basereg = args [0]->dreg;
5987 ins->inst_offset = 0;
5988 ins->sreg2 = ins_iconst->dreg;
5989 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5990 MONO_ADD_INS (cfg->cbb, ins);
5992 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5993 MonoInst *ins_iconst;
5996 if (fsig->params [0]->type == MONO_TYPE_I4) {
5997 opcode = OP_ATOMIC_ADD_I4;
5998 cfg->has_atomic_add_i4 = TRUE;
6000 #if SIZEOF_REGISTER == 8
6001 else if (fsig->params [0]->type == MONO_TYPE_I8)
6002 opcode = OP_ATOMIC_ADD_I8;
6005 if (!mono_arch_opcode_supported (opcode))
6007 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6008 ins_iconst->inst_c0 = -1;
6009 ins_iconst->dreg = mono_alloc_ireg (cfg);
6010 MONO_ADD_INS (cfg->cbb, ins_iconst);
6012 MONO_INST_NEW (cfg, ins, opcode);
6013 ins->dreg = mono_alloc_ireg (cfg);
6014 ins->inst_basereg = args [0]->dreg;
6015 ins->inst_offset = 0;
6016 ins->sreg2 = ins_iconst->dreg;
6017 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6018 MONO_ADD_INS (cfg->cbb, ins);
6020 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6023 if (fsig->params [0]->type == MONO_TYPE_I4) {
6024 opcode = OP_ATOMIC_ADD_I4;
6025 cfg->has_atomic_add_i4 = TRUE;
6027 #if SIZEOF_REGISTER == 8
6028 else if (fsig->params [0]->type == MONO_TYPE_I8)
6029 opcode = OP_ATOMIC_ADD_I8;
6032 if (!mono_arch_opcode_supported (opcode))
6034 MONO_INST_NEW (cfg, ins, opcode);
6035 ins->dreg = mono_alloc_ireg (cfg);
6036 ins->inst_basereg = args [0]->dreg;
6037 ins->inst_offset = 0;
6038 ins->sreg2 = args [1]->dreg;
6039 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6040 MONO_ADD_INS (cfg->cbb, ins);
6043 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6044 MonoInst *f2i = NULL, *i2f;
6045 guint32 opcode, f2i_opcode, i2f_opcode;
6046 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6047 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6049 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6050 fsig->params [0]->type == MONO_TYPE_R4) {
6051 opcode = OP_ATOMIC_EXCHANGE_I4;
6052 f2i_opcode = OP_MOVE_F_TO_I4;
6053 i2f_opcode = OP_MOVE_I4_TO_F;
6054 cfg->has_atomic_exchange_i4 = TRUE;
6056 #if SIZEOF_REGISTER == 8
6058 fsig->params [0]->type == MONO_TYPE_I8 ||
6059 fsig->params [0]->type == MONO_TYPE_R8 ||
6060 fsig->params [0]->type == MONO_TYPE_I) {
6061 opcode = OP_ATOMIC_EXCHANGE_I8;
6062 f2i_opcode = OP_MOVE_F_TO_I8;
6063 i2f_opcode = OP_MOVE_I8_TO_F;
6066 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6067 opcode = OP_ATOMIC_EXCHANGE_I4;
6068 cfg->has_atomic_exchange_i4 = TRUE;
6074 if (!mono_arch_opcode_supported (opcode))
6078 /* TODO: Decompose these opcodes instead of bailing here. */
6079 if (COMPILE_SOFT_FLOAT (cfg))
6082 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6083 f2i->dreg = mono_alloc_ireg (cfg);
6084 f2i->sreg1 = args [1]->dreg;
6085 if (f2i_opcode == OP_MOVE_F_TO_I4)
6086 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6087 MONO_ADD_INS (cfg->cbb, f2i);
6090 MONO_INST_NEW (cfg, ins, opcode);
6091 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6092 ins->inst_basereg = args [0]->dreg;
6093 ins->inst_offset = 0;
6094 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6095 MONO_ADD_INS (cfg->cbb, ins);
6097 switch (fsig->params [0]->type) {
6099 ins->type = STACK_I4;
6102 ins->type = STACK_I8;
6105 #if SIZEOF_REGISTER == 8
6106 ins->type = STACK_I8;
6108 ins->type = STACK_I4;
6113 ins->type = STACK_R8;
6116 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6117 ins->type = STACK_OBJ;
6122 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6123 i2f->dreg = mono_alloc_freg (cfg);
6124 i2f->sreg1 = ins->dreg;
6125 i2f->type = STACK_R8;
6126 if (i2f_opcode == OP_MOVE_I4_TO_F)
6127 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6128 MONO_ADD_INS (cfg->cbb, i2f);
6133 if (cfg->gen_write_barriers && is_ref)
6134 emit_write_barrier (cfg, args [0], args [1]);
6136 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6137 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6138 guint32 opcode, f2i_opcode, i2f_opcode;
6139 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6140 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6142 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6143 fsig->params [1]->type == MONO_TYPE_R4) {
6144 opcode = OP_ATOMIC_CAS_I4;
6145 f2i_opcode = OP_MOVE_F_TO_I4;
6146 i2f_opcode = OP_MOVE_I4_TO_F;
6147 cfg->has_atomic_cas_i4 = TRUE;
6149 #if SIZEOF_REGISTER == 8
6151 fsig->params [1]->type == MONO_TYPE_I8 ||
6152 fsig->params [1]->type == MONO_TYPE_R8 ||
6153 fsig->params [1]->type == MONO_TYPE_I) {
6154 opcode = OP_ATOMIC_CAS_I8;
6155 f2i_opcode = OP_MOVE_F_TO_I8;
6156 i2f_opcode = OP_MOVE_I8_TO_F;
6159 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6160 opcode = OP_ATOMIC_CAS_I4;
6161 cfg->has_atomic_cas_i4 = TRUE;
6167 if (!mono_arch_opcode_supported (opcode))
6171 /* TODO: Decompose these opcodes instead of bailing here. */
6172 if (COMPILE_SOFT_FLOAT (cfg))
6175 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6176 f2i_new->dreg = mono_alloc_ireg (cfg);
6177 f2i_new->sreg1 = args [1]->dreg;
6178 if (f2i_opcode == OP_MOVE_F_TO_I4)
6179 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6180 MONO_ADD_INS (cfg->cbb, f2i_new);
6182 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6183 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6184 f2i_cmp->sreg1 = args [2]->dreg;
6185 if (f2i_opcode == OP_MOVE_F_TO_I4)
6186 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6187 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6190 MONO_INST_NEW (cfg, ins, opcode);
6191 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6192 ins->sreg1 = args [0]->dreg;
6193 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6194 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6195 MONO_ADD_INS (cfg->cbb, ins);
6197 switch (fsig->params [0]->type) {
6199 ins->type = STACK_I4;
6202 ins->type = STACK_I8;
6205 #if SIZEOF_REGISTER == 8
6206 ins->type = STACK_I8;
6208 ins->type = STACK_I4;
6213 ins->type = STACK_R8;
6216 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6217 ins->type = STACK_OBJ;
6222 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6223 i2f->dreg = mono_alloc_freg (cfg);
6224 i2f->sreg1 = ins->dreg;
6225 i2f->type = STACK_R8;
6226 if (i2f_opcode == OP_MOVE_I4_TO_F)
6227 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6228 MONO_ADD_INS (cfg->cbb, i2f);
6233 if (cfg->gen_write_barriers && is_ref)
6234 emit_write_barrier (cfg, args [0], args [1]);
6236 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6237 fsig->params [1]->type == MONO_TYPE_I4) {
6238 MonoInst *cmp, *ceq;
6240 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6243 /* int32 r = CAS (location, value, comparand); */
6244 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6245 ins->dreg = alloc_ireg (cfg);
6246 ins->sreg1 = args [0]->dreg;
6247 ins->sreg2 = args [1]->dreg;
6248 ins->sreg3 = args [2]->dreg;
6249 ins->type = STACK_I4;
6250 MONO_ADD_INS (cfg->cbb, ins);
6252 /* bool result = r == comparand; */
6253 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6254 cmp->sreg1 = ins->dreg;
6255 cmp->sreg2 = args [2]->dreg;
6256 cmp->type = STACK_I4;
6257 MONO_ADD_INS (cfg->cbb, cmp);
6259 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6260 ceq->dreg = alloc_ireg (cfg);
6261 ceq->type = STACK_I4;
6262 MONO_ADD_INS (cfg->cbb, ceq);
6264 /* *success = result; */
6265 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6267 cfg->has_atomic_cas_i4 = TRUE;
6269 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6270 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6274 } else if (cmethod->klass->image == mono_defaults.corlib &&
6275 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6276 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6279 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6281 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6282 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6284 if (fsig->params [0]->type == MONO_TYPE_I1)
6285 opcode = OP_ATOMIC_LOAD_I1;
6286 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6287 opcode = OP_ATOMIC_LOAD_U1;
6288 else if (fsig->params [0]->type == MONO_TYPE_I2)
6289 opcode = OP_ATOMIC_LOAD_I2;
6290 else if (fsig->params [0]->type == MONO_TYPE_U2)
6291 opcode = OP_ATOMIC_LOAD_U2;
6292 else if (fsig->params [0]->type == MONO_TYPE_I4)
6293 opcode = OP_ATOMIC_LOAD_I4;
6294 else if (fsig->params [0]->type == MONO_TYPE_U4)
6295 opcode = OP_ATOMIC_LOAD_U4;
6296 else if (fsig->params [0]->type == MONO_TYPE_R4)
6297 opcode = OP_ATOMIC_LOAD_R4;
6298 else if (fsig->params [0]->type == MONO_TYPE_R8)
6299 opcode = OP_ATOMIC_LOAD_R8;
6300 #if SIZEOF_REGISTER == 8
6301 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6302 opcode = OP_ATOMIC_LOAD_I8;
6303 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6304 opcode = OP_ATOMIC_LOAD_U8;
6306 else if (fsig->params [0]->type == MONO_TYPE_I)
6307 opcode = OP_ATOMIC_LOAD_I4;
6308 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6309 opcode = OP_ATOMIC_LOAD_U4;
6313 if (!mono_arch_opcode_supported (opcode))
6316 MONO_INST_NEW (cfg, ins, opcode);
6317 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6318 ins->sreg1 = args [0]->dreg;
6319 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6320 MONO_ADD_INS (cfg->cbb, ins);
6322 switch (fsig->params [0]->type) {
6323 case MONO_TYPE_BOOLEAN:
6330 ins->type = STACK_I4;
6334 ins->type = STACK_I8;
6338 #if SIZEOF_REGISTER == 8
6339 ins->type = STACK_I8;
6341 ins->type = STACK_I4;
6346 ins->type = STACK_R8;
6349 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6350 ins->type = STACK_OBJ;
6356 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6358 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6360 if (fsig->params [0]->type == MONO_TYPE_I1)
6361 opcode = OP_ATOMIC_STORE_I1;
6362 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6363 opcode = OP_ATOMIC_STORE_U1;
6364 else if (fsig->params [0]->type == MONO_TYPE_I2)
6365 opcode = OP_ATOMIC_STORE_I2;
6366 else if (fsig->params [0]->type == MONO_TYPE_U2)
6367 opcode = OP_ATOMIC_STORE_U2;
6368 else if (fsig->params [0]->type == MONO_TYPE_I4)
6369 opcode = OP_ATOMIC_STORE_I4;
6370 else if (fsig->params [0]->type == MONO_TYPE_U4)
6371 opcode = OP_ATOMIC_STORE_U4;
6372 else if (fsig->params [0]->type == MONO_TYPE_R4)
6373 opcode = OP_ATOMIC_STORE_R4;
6374 else if (fsig->params [0]->type == MONO_TYPE_R8)
6375 opcode = OP_ATOMIC_STORE_R8;
6376 #if SIZEOF_REGISTER == 8
6377 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6378 opcode = OP_ATOMIC_STORE_I8;
6379 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6380 opcode = OP_ATOMIC_STORE_U8;
6382 else if (fsig->params [0]->type == MONO_TYPE_I)
6383 opcode = OP_ATOMIC_STORE_I4;
6384 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6385 opcode = OP_ATOMIC_STORE_U4;
6389 if (!mono_arch_opcode_supported (opcode))
6392 MONO_INST_NEW (cfg, ins, opcode);
6393 ins->dreg = args [0]->dreg;
6394 ins->sreg1 = args [1]->dreg;
6395 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6396 MONO_ADD_INS (cfg->cbb, ins);
6398 if (cfg->gen_write_barriers && is_ref)
6399 emit_write_barrier (cfg, args [0], args [1]);
6405 } else if (cmethod->klass->image == mono_defaults.corlib &&
6406 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6407 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6408 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6409 if (should_insert_brekpoint (cfg->method)) {
6410 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6412 MONO_INST_NEW (cfg, ins, OP_NOP);
6413 MONO_ADD_INS (cfg->cbb, ins);
6417 } else if (cmethod->klass->image == mono_defaults.corlib &&
6418 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6419 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6420 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6422 EMIT_NEW_ICONST (cfg, ins, 1);
6424 EMIT_NEW_ICONST (cfg, ins, 0);
6427 } else if (cmethod->klass == mono_defaults.math_class) {
6429 * There is general branchless code for Min/Max, but it does not work for
6431 * http://everything2.com/?node_id=1051618
6433 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6434 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6435 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6436 !strcmp (cmethod->klass->name, "Selector")) {
6437 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6438 if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
6439 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6442 MonoJumpInfoToken *ji;
6445 cfg->disable_llvm = TRUE;
6447 if (args [0]->opcode == OP_GOT_ENTRY) {
6448 pi = args [0]->inst_p1;
6449 g_assert (pi->opcode == OP_PATCH_INFO);
6450 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6453 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6454 ji = args [0]->inst_p0;
6457 NULLIFY_INS (args [0]);
6460 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6461 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6462 ins->dreg = mono_alloc_ireg (cfg);
6464 ins->inst_p0 = mono_string_to_utf8 (s);
6465 MONO_ADD_INS (cfg->cbb, ins);
6471 #ifdef MONO_ARCH_SIMD_INTRINSICS
6472 if (cfg->opt & MONO_OPT_SIMD) {
6473 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6479 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6483 if (COMPILE_LLVM (cfg)) {
6484 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6489 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6493 * This entry point could be used later for arbitrary method
6496 inline static MonoInst*
6497 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6498 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6500 if (method->klass == mono_defaults.string_class) {
6501 /* managed string allocation support */
6502 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6503 MonoInst *iargs [2];
6504 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6505 MonoMethod *managed_alloc = NULL;
6507 g_assert (vtable); /*Should not fail since it System.String*/
6508 #ifndef MONO_CROSS_COMPILE
6509 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6513 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6514 iargs [1] = args [0];
6515 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6522 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6524 MonoInst *store, *temp;
6527 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6528 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6531 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6532 * would be different than the MonoInst's used to represent arguments, and
6533 * the ldelema implementation can't deal with that.
6534 * Solution: When ldelema is used on an inline argument, create a var for
6535 * it, emit ldelema on that var, and emit the saving code below in
6536 * inline_method () if needed.
6538 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6539 cfg->args [i] = temp;
6540 /* This uses cfg->args [i] which is set by the preceeding line */
6541 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6542 store->cil_code = sp [0]->cil_code;
6547 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6548 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6550 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6552 check_inline_called_method_name_limit (MonoMethod *called_method)
6555 static const char *limit = NULL;
6557 if (limit == NULL) {
6558 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6560 if (limit_string != NULL)
6561 limit = limit_string;
6566 if (limit [0] != '\0') {
6567 char *called_method_name = mono_method_full_name (called_method, TRUE);
6569 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6570 g_free (called_method_name);
6572 //return (strncmp_result <= 0);
6573 return (strncmp_result == 0);
6580 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6582 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6585 static const char *limit = NULL;
6587 if (limit == NULL) {
6588 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6589 if (limit_string != NULL) {
6590 limit = limit_string;
6596 if (limit [0] != '\0') {
6597 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6599 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6600 g_free (caller_method_name);
6602 //return (strncmp_result <= 0);
6603 return (strncmp_result == 0);
6611 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6613 static double r8_0 = 0.0;
6614 static float r4_0 = 0.0;
6618 rtype = mini_replace_type (rtype);
6622 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6623 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6624 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6625 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6626 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6627 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6628 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6629 ins->type = STACK_R4;
6630 ins->inst_p0 = (void*)&r4_0;
6632 MONO_ADD_INS (cfg->cbb, ins);
6633 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6634 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6635 ins->type = STACK_R8;
6636 ins->inst_p0 = (void*)&r8_0;
6638 MONO_ADD_INS (cfg->cbb, ins);
6639 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6640 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6641 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6642 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6643 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6645 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6650 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6654 rtype = mini_replace_type (rtype);
6658 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6659 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6660 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6661 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6662 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6663 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6664 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6665 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6666 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6667 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6668 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6669 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6670 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6671 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6673 emit_init_rvar (cfg, dreg, rtype);
6677 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6679 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6681 MonoInst *var = cfg->locals [local];
6682 if (COMPILE_SOFT_FLOAT (cfg)) {
6684 int reg = alloc_dreg (cfg, var->type);
6685 emit_init_rvar (cfg, reg, type);
6686 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6689 emit_init_rvar (cfg, var->dreg, type);
6691 emit_dummy_init_rvar (cfg, var->dreg, type);
6698 * Return the cost of inlining CMETHOD.
6701 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6702 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6704 MonoInst *ins, *rvar = NULL;
6705 MonoMethodHeader *cheader;
6706 MonoBasicBlock *ebblock, *sbblock;
6708 MonoMethod *prev_inlined_method;
6709 MonoInst **prev_locals, **prev_args;
6710 MonoType **prev_arg_types;
6711 guint prev_real_offset;
6712 GHashTable *prev_cbb_hash;
6713 MonoBasicBlock **prev_cil_offset_to_bb;
6714 MonoBasicBlock *prev_cbb;
6715 unsigned char* prev_cil_start;
6716 guint32 prev_cil_offset_to_bb_len;
6717 MonoMethod *prev_current_method;
6718 MonoGenericContext *prev_generic_context;
6719 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6721 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6723 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6724 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6727 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6728 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6732 if (cfg->verbose_level > 2)
6733 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6735 if (!cmethod->inline_info) {
6736 cfg->stat_inlineable_methods++;
6737 cmethod->inline_info = 1;
6740 /* allocate local variables */
6741 cheader = mono_method_get_header (cmethod);
6743 if (cheader == NULL || mono_loader_get_last_error ()) {
6744 MonoLoaderError *error = mono_loader_get_last_error ();
6747 mono_metadata_free_mh (cheader);
6748 if (inline_always && error)
6749 mono_cfg_set_exception (cfg, error->exception_type);
6751 mono_loader_clear_error ();
6755 /*Must verify before creating locals as it can cause the JIT to assert.*/
6756 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6757 mono_metadata_free_mh (cheader);
6761 /* allocate space to store the return value */
6762 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6763 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6766 prev_locals = cfg->locals;
6767 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6768 for (i = 0; i < cheader->num_locals; ++i)
6769 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6771 /* allocate start and end blocks */
6772 /* This is needed so if the inline is aborted, we can clean up */
6773 NEW_BBLOCK (cfg, sbblock);
6774 sbblock->real_offset = real_offset;
6776 NEW_BBLOCK (cfg, ebblock);
6777 ebblock->block_num = cfg->num_bblocks++;
6778 ebblock->real_offset = real_offset;
6780 prev_args = cfg->args;
6781 prev_arg_types = cfg->arg_types;
6782 prev_inlined_method = cfg->inlined_method;
6783 cfg->inlined_method = cmethod;
6784 cfg->ret_var_set = FALSE;
6785 cfg->inline_depth ++;
6786 prev_real_offset = cfg->real_offset;
6787 prev_cbb_hash = cfg->cbb_hash;
6788 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6789 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6790 prev_cil_start = cfg->cil_start;
6791 prev_cbb = cfg->cbb;
6792 prev_current_method = cfg->current_method;
6793 prev_generic_context = cfg->generic_context;
6794 prev_ret_var_set = cfg->ret_var_set;
6795 prev_disable_inline = cfg->disable_inline;
6797 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6800 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6802 ret_var_set = cfg->ret_var_set;
6804 cfg->inlined_method = prev_inlined_method;
6805 cfg->real_offset = prev_real_offset;
6806 cfg->cbb_hash = prev_cbb_hash;
6807 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6808 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6809 cfg->cil_start = prev_cil_start;
6810 cfg->locals = prev_locals;
6811 cfg->args = prev_args;
6812 cfg->arg_types = prev_arg_types;
6813 cfg->current_method = prev_current_method;
6814 cfg->generic_context = prev_generic_context;
6815 cfg->ret_var_set = prev_ret_var_set;
6816 cfg->disable_inline = prev_disable_inline;
6817 cfg->inline_depth --;
6819 if ((costs >= 0 && costs < 60) || inline_always) {
6820 if (cfg->verbose_level > 2)
6821 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6823 cfg->stat_inlined_methods++;
6825 /* always add some code to avoid block split failures */
6826 MONO_INST_NEW (cfg, ins, OP_NOP);
6827 MONO_ADD_INS (prev_cbb, ins);
6829 prev_cbb->next_bb = sbblock;
6830 link_bblock (cfg, prev_cbb, sbblock);
6833 * Get rid of the begin and end bblocks if possible to aid local
6836 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6838 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6839 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6841 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6842 MonoBasicBlock *prev = ebblock->in_bb [0];
6843 mono_merge_basic_blocks (cfg, prev, ebblock);
6845 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6846 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6847 cfg->cbb = prev_cbb;
6851 * Its possible that the rvar is set in some prev bblock, but not in others.
6857 for (i = 0; i < ebblock->in_count; ++i) {
6858 bb = ebblock->in_bb [i];
6860 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6863 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6871 *out_cbb = cfg->cbb;
6875 * If the inlined method contains only a throw, then the ret var is not
6876 * set, so set it to a dummy value.
6879 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6881 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6884 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6887 if (cfg->verbose_level > 2)
6888 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6889 cfg->exception_type = MONO_EXCEPTION_NONE;
6890 mono_loader_clear_error ();
6892 /* This gets rid of the newly added bblocks */
6893 cfg->cbb = prev_cbb;
6895 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6900 * Some of these comments may well be out-of-date.
6901 * Design decisions: we do a single pass over the IL code (and we do bblock
6902 * splitting/merging in the few cases when it's required: a back jump to an IL
6903 * address that was not already seen as bblock starting point).
6904 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6905 * Complex operations are decomposed in simpler ones right away. We need to let the
6906 * arch-specific code peek and poke inside this process somehow (except when the
6907 * optimizations can take advantage of the full semantic info of coarse opcodes).
6908 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6909 * MonoInst->opcode initially is the IL opcode or some simplification of that
6910 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6911 * opcode with value bigger than OP_LAST.
6912 * At this point the IR can be handed over to an interpreter, a dumb code generator
6913 * or to the optimizing code generator that will translate it to SSA form.
6915 * Profiling directed optimizations.
6916 * We may compile by default with few or no optimizations and instrument the code
6917 * or the user may indicate what methods to optimize the most either in a config file
6918 * or through repeated runs where the compiler applies offline the optimizations to
6919 * each method and then decides if it was worth it.
6922 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6923 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6924 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6925 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6926 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6927 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6928 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6929 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6931 /* offset from br.s -> br like opcodes */
6932 #define BIG_BRANCH_OFFSET 13
6935 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6937 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6939 return b == NULL || b == bb;
6943 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6945 unsigned char *ip = start;
6946 unsigned char *target;
6949 MonoBasicBlock *bblock;
6950 const MonoOpcode *opcode;
6953 cli_addr = ip - start;
6954 i = mono_opcode_value ((const guint8 **)&ip, end);
6957 opcode = &mono_opcodes [i];
6958 switch (opcode->argument) {
6959 case MonoInlineNone:
6962 case MonoInlineString:
6963 case MonoInlineType:
6964 case MonoInlineField:
6965 case MonoInlineMethod:
6968 case MonoShortInlineR:
6975 case MonoShortInlineVar:
6976 case MonoShortInlineI:
6979 case MonoShortInlineBrTarget:
6980 target = start + cli_addr + 2 + (signed char)ip [1];
6981 GET_BBLOCK (cfg, bblock, target);
6984 GET_BBLOCK (cfg, bblock, ip);
6986 case MonoInlineBrTarget:
6987 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6988 GET_BBLOCK (cfg, bblock, target);
6991 GET_BBLOCK (cfg, bblock, ip);
6993 case MonoInlineSwitch: {
6994 guint32 n = read32 (ip + 1);
6997 cli_addr += 5 + 4 * n;
6998 target = start + cli_addr;
6999 GET_BBLOCK (cfg, bblock, target);
7001 for (j = 0; j < n; ++j) {
7002 target = start + cli_addr + (gint32)read32 (ip);
7003 GET_BBLOCK (cfg, bblock, target);
7013 g_assert_not_reached ();
7016 if (i == CEE_THROW) {
7017 unsigned char *bb_start = ip - 1;
7019 /* Find the start of the bblock containing the throw */
7021 while ((bb_start >= start) && !bblock) {
7022 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7026 bblock->out_of_line = 1;
7036 static inline MonoMethod *
7037 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7041 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7042 method = mono_method_get_wrapper_data (m, token);
7045 method = mono_class_inflate_generic_method_checked (method, context, &error);
7046 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7049 method = mono_get_method_full (m->klass->image, token, klass, context);
7055 static inline MonoMethod *
7056 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7058 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7060 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7066 static inline MonoClass*
7067 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7072 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7073 klass = mono_method_get_wrapper_data (method, token);
7075 klass = mono_class_inflate_generic_class (klass, context);
7077 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7078 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7081 mono_class_init (klass);
7085 static inline MonoMethodSignature*
7086 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7088 MonoMethodSignature *fsig;
7090 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7093 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7095 fsig = mono_inflate_generic_signature (fsig, context, &error);
7097 g_assert (mono_error_ok (&error));
7100 fsig = mono_metadata_parse_signature (method->klass->image, token);
7106 * Returns TRUE if the JIT should abort inlining because "callee"
7107 * is influenced by security attributes.
7110 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7114 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
7118 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
7119 if (result == MONO_JIT_SECURITY_OK)
7122 if (result == MONO_JIT_LINKDEMAND_ECMA) {
7123 /* Generate code to throw a SecurityException before the actual call/link */
7124 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7127 NEW_ICONST (cfg, args [0], 4);
7128 NEW_METHODCONST (cfg, args [1], caller);
7129 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
7130 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
7131 /* don't hide previous results */
7132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
7133 cfg->exception_data = result;
7141 throw_exception (void)
7143 static MonoMethod *method = NULL;
7146 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7147 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7154 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7156 MonoMethod *thrower = throw_exception ();
7159 EMIT_NEW_PCONST (cfg, args [0], ex);
7160 mono_emit_method_call (cfg, thrower, args, NULL);
7164 * Return the original method is a wrapper is specified. We can only access
7165 * the custom attributes from the original method.
7168 get_original_method (MonoMethod *method)
7170 if (method->wrapper_type == MONO_WRAPPER_NONE)
7173 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7174 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7177 /* in other cases we need to find the original method */
7178 return mono_marshal_method_from_wrapper (method);
7182 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7183 MonoBasicBlock *bblock, unsigned char *ip)
7185 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7186 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7188 emit_throw_exception (cfg, ex);
7192 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7193 MonoBasicBlock *bblock, unsigned char *ip)
7195 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7196 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7198 emit_throw_exception (cfg, ex);
7202 * Check that the IL instructions at ip are the array initialization
7203 * sequence and return the pointer to the data and the size.
7206 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7209 * newarr[System.Int32]
7211 * ldtoken field valuetype ...
7212 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7214 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7216 guint32 token = read32 (ip + 7);
7217 guint32 field_token = read32 (ip + 2);
7218 guint32 field_index = field_token & 0xffffff;
7220 const char *data_ptr;
7222 MonoMethod *cmethod;
7223 MonoClass *dummy_class;
7224 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7228 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7232 *out_field_token = field_token;
7234 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7237 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7239 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7240 case MONO_TYPE_BOOLEAN:
7244 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7245 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7246 case MONO_TYPE_CHAR:
7263 if (size > mono_type_size (field->type, &dummy_align))
7266 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7267 if (!image_is_dynamic (method->klass->image)) {
7268 field_index = read32 (ip + 2) & 0xffffff;
7269 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7270 data_ptr = mono_image_rva_map (method->klass->image, rva);
7271 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7272 /* for aot code we do the lookup on load */
7273 if (aot && data_ptr)
7274 return GUINT_TO_POINTER (rva);
7276 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7278 data_ptr = mono_field_get_data (field);
7286 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7288 char *method_fname = mono_method_full_name (method, TRUE);
7290 MonoMethodHeader *header = mono_method_get_header (method);
7292 if (header->code_size == 0)
7293 method_code = g_strdup ("method body is empty.");
7295 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7296 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7297 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7298 g_free (method_fname);
7299 g_free (method_code);
7300 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7304 set_exception_object (MonoCompile *cfg, MonoException *exception)
7306 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7307 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7308 cfg->exception_ptr = exception;
7312 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7315 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7316 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7317 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7318 /* Optimize reg-reg moves away */
7320 * Can't optimize other opcodes, since sp[0] might point to
7321 * the last ins of a decomposed opcode.
7323 sp [0]->dreg = (cfg)->locals [n]->dreg;
7325 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7330 * ldloca inhibits many optimizations so try to get rid of it in common
7333 static inline unsigned char *
7334 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7344 local = read16 (ip + 2);
7348 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7349 /* From the INITOBJ case */
7350 token = read32 (ip + 2);
7351 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7352 CHECK_TYPELOAD (klass);
7353 type = mini_replace_type (&klass->byval_arg);
7354 emit_init_local (cfg, local, type, TRUE);
7362 is_exception_class (MonoClass *class)
7365 if (class == mono_defaults.exception_class)
7367 class = class->parent;
7373 * is_jit_optimizer_disabled:
7375 * Determine whenever M's assembly has a DebuggableAttribute with the
7376 * IsJITOptimizerDisabled flag set.
7379 is_jit_optimizer_disabled (MonoMethod *m)
7381 MonoAssembly *ass = m->klass->image->assembly;
7382 MonoCustomAttrInfo* attrs;
7383 static MonoClass *klass;
7385 gboolean val = FALSE;
7388 if (ass->jit_optimizer_disabled_inited)
7389 return ass->jit_optimizer_disabled;
7392 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7395 ass->jit_optimizer_disabled = FALSE;
7396 mono_memory_barrier ();
7397 ass->jit_optimizer_disabled_inited = TRUE;
7401 attrs = mono_custom_attrs_from_assembly (ass);
7403 for (i = 0; i < attrs->num_attrs; ++i) {
7404 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7407 MonoMethodSignature *sig;
7409 if (!attr->ctor || attr->ctor->klass != klass)
7411 /* Decode the attribute. See reflection.c */
7412 len = attr->data_size;
7413 p = (const char*)attr->data;
7414 g_assert (read16 (p) == 0x0001);
7417 // FIXME: Support named parameters
7418 sig = mono_method_signature (attr->ctor);
7419 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7421 /* Two boolean arguments */
7425 mono_custom_attrs_free (attrs);
7428 ass->jit_optimizer_disabled = val;
7429 mono_memory_barrier ();
7430 ass->jit_optimizer_disabled_inited = TRUE;
7436 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7438 gboolean supported_tail_call;
7441 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7442 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7444 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7447 for (i = 0; i < fsig->param_count; ++i) {
7448 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7449 /* These can point to the current method's stack */
7450 supported_tail_call = FALSE;
7452 if (fsig->hasthis && cmethod->klass->valuetype)
7453 /* this might point to the current method's stack */
7454 supported_tail_call = FALSE;
7455 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7456 supported_tail_call = FALSE;
7457 if (cfg->method->save_lmf)
7458 supported_tail_call = FALSE;
7459 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7460 supported_tail_call = FALSE;
7461 if (call_opcode != CEE_CALL)
7462 supported_tail_call = FALSE;
7464 /* Debugging support */
7466 if (supported_tail_call) {
7467 if (!mono_debug_count ())
7468 supported_tail_call = FALSE;
7472 return supported_tail_call;
7475 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7476 * it to the thread local value based on the tls_offset field. Every other kind of access to
7477 * the field causes an assert.
7480 is_magic_tls_access (MonoClassField *field)
7482 if (strcmp (field->name, "tlsdata"))
7484 if (strcmp (field->parent->name, "ThreadLocal`1"))
7486 return field->parent->image == mono_defaults.corlib;
7489 /* emits the code needed to access a managed tls var (like ThreadStatic)
7490 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7491 * pointer for the current thread.
7492 * Returns the MonoInst* representing the address of the tls var.
7495 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7498 int static_data_reg, array_reg, dreg;
7499 int offset2_reg, idx_reg;
7500 // inlined access to the tls data
7501 // idx = (offset >> 24) - 1;
7502 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
7503 static_data_reg = alloc_ireg (cfg);
7504 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7505 idx_reg = alloc_ireg (cfg);
7506 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
7507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7509 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7510 array_reg = alloc_ireg (cfg);
7511 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7512 offset2_reg = alloc_ireg (cfg);
7513 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7514 dreg = alloc_ireg (cfg);
7515 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7520 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7521 * this address is cached per-method in cached_tls_addr.
7524 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7526 MonoInst *load, *addr, *temp, *store, *thread_ins;
7527 MonoClassField *offset_field;
7529 if (*cached_tls_addr) {
7530 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7533 thread_ins = mono_get_thread_intrinsic (cfg);
7534 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7536 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7538 MONO_ADD_INS (cfg->cbb, thread_ins);
7540 MonoMethod *thread_method;
7541 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7542 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7544 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7545 addr->klass = mono_class_from_mono_type (tls_field->type);
7546 addr->type = STACK_MP;
7547 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7548 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7550 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7557 * Handle calls made to ctors from NEWOBJ opcodes.
7559 * REF_BBLOCK will point to the current bblock after the call.
7562 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7563 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7565 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7566 MonoBasicBlock *bblock = *ref_bblock;
7568 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7569 mono_method_is_generic_sharable (cmethod, TRUE)) {
7570 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7571 mono_class_vtable (cfg->domain, cmethod->klass);
7572 CHECK_TYPELOAD (cmethod->klass);
7574 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7575 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7578 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7579 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7581 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7583 CHECK_TYPELOAD (cmethod->klass);
7584 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7589 /* Avoid virtual calls to ctors if possible */
7590 if (mono_class_is_marshalbyref (cmethod->klass))
7591 callvirt_this_arg = sp [0];
7593 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7594 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7595 CHECK_CFG_EXCEPTION;
7596 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7597 mono_method_check_inlining (cfg, cmethod) &&
7598 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7601 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7602 cfg->real_offset += 5;
7604 *inline_costs += costs - 5;
7605 *ref_bblock = bblock;
7607 INLINE_FAILURE ("inline failure");
7608 // FIXME-VT: Clean this up
7609 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7610 GSHAREDVT_FAILURE(*ip);
7611 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7613 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7616 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7617 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7618 } else if (context_used &&
7619 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7620 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7621 MonoInst *cmethod_addr;
7623 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7625 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7626 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7628 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7630 INLINE_FAILURE ("ctor call");
7631 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7632 callvirt_this_arg, NULL, vtable_arg);
7639 * mono_method_to_ir:
7641 * Translate the .net IL into linear IR.
7644 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7645 MonoInst *return_var, MonoInst **inline_args,
7646 guint inline_offset, gboolean is_virtual_call)
7649 MonoInst *ins, **sp, **stack_start;
7650 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7651 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7652 MonoMethod *cmethod, *method_definition;
7653 MonoInst **arg_array;
7654 MonoMethodHeader *header;
7656 guint32 token, ins_flag;
7658 MonoClass *constrained_call = NULL;
7659 unsigned char *ip, *end, *target, *err_pos;
7660 MonoMethodSignature *sig;
7661 MonoGenericContext *generic_context = NULL;
7662 MonoGenericContainer *generic_container = NULL;
7663 MonoType **param_types;
7664 int i, n, start_new_bblock, dreg;
7665 int num_calls = 0, inline_costs = 0;
7666 int breakpoint_id = 0;
7668 MonoBoolean security, pinvoke;
7669 MonoSecurityManager* secman = NULL;
7670 MonoDeclSecurityActions actions;
7671 GSList *class_inits = NULL;
7672 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7674 gboolean init_locals, seq_points, skip_dead_blocks;
7675 gboolean sym_seq_points = FALSE;
7676 MonoInst *cached_tls_addr = NULL;
7677 MonoDebugMethodInfo *minfo;
7678 MonoBitSet *seq_point_locs = NULL;
7679 MonoBitSet *seq_point_set_locs = NULL;
7681 cfg->disable_inline = is_jit_optimizer_disabled (method);
7683 /* serialization and xdomain stuff may need access to private fields and methods */
7684 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7685 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7686 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7687 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7688 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7689 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7691 dont_verify |= mono_security_smcs_hack_enabled ();
7693 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7694 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7695 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7696 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7697 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7699 image = method->klass->image;
7700 header = mono_method_get_header (method);
7702 MonoLoaderError *error;
7704 if ((error = mono_loader_get_last_error ())) {
7705 mono_cfg_set_exception (cfg, error->exception_type);
7707 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7708 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7710 goto exception_exit;
7712 generic_container = mono_method_get_generic_container (method);
7713 sig = mono_method_signature (method);
7714 num_args = sig->hasthis + sig->param_count;
7715 ip = (unsigned char*)header->code;
7716 cfg->cil_start = ip;
7717 end = ip + header->code_size;
7718 cfg->stat_cil_code_size += header->code_size;
7720 seq_points = cfg->gen_seq_points && cfg->method == method;
7721 #ifdef PLATFORM_ANDROID
7722 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7725 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7726 /* We could hit a seq point before attaching to the JIT (#8338) */
7730 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7731 minfo = mono_debug_lookup_method (method);
7733 int i, n_il_offsets;
7737 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7738 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7739 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7740 sym_seq_points = TRUE;
7741 for (i = 0; i < n_il_offsets; ++i) {
7742 if (il_offsets [i] < header->code_size)
7743 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7745 g_free (il_offsets);
7746 g_free (line_numbers);
7747 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7748 /* Methods without line number info like auto-generated property accessors */
7749 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7750 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7751 sym_seq_points = TRUE;
7756 * Methods without init_locals set could cause asserts in various passes
7757 * (#497220). To work around this, we emit dummy initialization opcodes
7758 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7759 * on some platforms.
7761 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7762 init_locals = header->init_locals;
7766 method_definition = method;
7767 while (method_definition->is_inflated) {
7768 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7769 method_definition = imethod->declaring;
7772 /* SkipVerification is not allowed if core-clr is enabled */
7773 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7775 dont_verify_stloc = TRUE;
7778 if (sig->is_inflated)
7779 generic_context = mono_method_get_context (method);
7780 else if (generic_container)
7781 generic_context = &generic_container->context;
7782 cfg->generic_context = generic_context;
7784 if (!cfg->generic_sharing_context)
7785 g_assert (!sig->has_type_parameters);
7787 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7788 g_assert (method->is_inflated);
7789 g_assert (mono_method_get_context (method)->method_inst);
7791 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7792 g_assert (sig->generic_param_count);
7794 if (cfg->method == method) {
7795 cfg->real_offset = 0;
7797 cfg->real_offset = inline_offset;
7800 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7801 cfg->cil_offset_to_bb_len = header->code_size;
7803 cfg->current_method = method;
7805 if (cfg->verbose_level > 2)
7806 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7808 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7810 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7811 for (n = 0; n < sig->param_count; ++n)
7812 param_types [n + sig->hasthis] = sig->params [n];
7813 cfg->arg_types = param_types;
7815 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7816 if (cfg->method == method) {
7818 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7819 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7822 NEW_BBLOCK (cfg, start_bblock);
7823 cfg->bb_entry = start_bblock;
7824 start_bblock->cil_code = NULL;
7825 start_bblock->cil_length = 0;
7826 #if defined(__native_client_codegen__)
7827 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7828 ins->dreg = alloc_dreg (cfg, STACK_I4);
7829 MONO_ADD_INS (start_bblock, ins);
7833 NEW_BBLOCK (cfg, end_bblock);
7834 cfg->bb_exit = end_bblock;
7835 end_bblock->cil_code = NULL;
7836 end_bblock->cil_length = 0;
7837 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7838 g_assert (cfg->num_bblocks == 2);
7840 arg_array = cfg->args;
7842 if (header->num_clauses) {
7843 cfg->spvars = g_hash_table_new (NULL, NULL);
7844 cfg->exvars = g_hash_table_new (NULL, NULL);
7846 /* handle exception clauses */
7847 for (i = 0; i < header->num_clauses; ++i) {
7848 MonoBasicBlock *try_bb;
7849 MonoExceptionClause *clause = &header->clauses [i];
7850 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7851 try_bb->real_offset = clause->try_offset;
7852 try_bb->try_start = TRUE;
7853 try_bb->region = ((i + 1) << 8) | clause->flags;
7854 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7855 tblock->real_offset = clause->handler_offset;
7856 tblock->flags |= BB_EXCEPTION_HANDLER;
7859 * Linking the try block with the EH block hinders inlining as we won't be able to
7860 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7862 if (COMPILE_LLVM (cfg))
7863 link_bblock (cfg, try_bb, tblock);
7865 if (*(ip + clause->handler_offset) == CEE_POP)
7866 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7868 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7869 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7870 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7871 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7872 MONO_ADD_INS (tblock, ins);
7874 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7875 /* finally clauses already have a seq point */
7876 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7877 MONO_ADD_INS (tblock, ins);
7880 /* todo: is a fault block unsafe to optimize? */
7881 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7882 tblock->flags |= BB_EXCEPTION_UNSAFE;
7886 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7888 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7890 /* catch and filter blocks get the exception object on the stack */
7891 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7892 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7893 MonoInst *dummy_use;
7895 /* mostly like handle_stack_args (), but just sets the input args */
7896 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7897 tblock->in_scount = 1;
7898 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7899 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7902 * Add a dummy use for the exvar so its liveness info will be
7906 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7908 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7909 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7910 tblock->flags |= BB_EXCEPTION_HANDLER;
7911 tblock->real_offset = clause->data.filter_offset;
7912 tblock->in_scount = 1;
7913 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7914 /* The filter block shares the exvar with the handler block */
7915 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7916 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7917 MONO_ADD_INS (tblock, ins);
7921 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7922 clause->data.catch_class &&
7923 cfg->generic_sharing_context &&
7924 mono_class_check_context_used (clause->data.catch_class)) {
7926 * In shared generic code with catch
7927 * clauses containing type variables
7928 * the exception handling code has to
7929 * be able to get to the rgctx.
7930 * Therefore we have to make sure that
7931 * the vtable/mrgctx argument (for
7932 * static or generic methods) or the
7933 * "this" argument (for non-static
7934 * methods) are live.
7936 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7937 mini_method_get_context (method)->method_inst ||
7938 method->klass->valuetype) {
7939 mono_get_vtable_var (cfg);
7941 MonoInst *dummy_use;
7943 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7948 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7949 cfg->cbb = start_bblock;
7950 cfg->args = arg_array;
7951 mono_save_args (cfg, sig, inline_args);
7954 /* FIRST CODE BLOCK */
7955 NEW_BBLOCK (cfg, bblock);
7956 bblock->cil_code = ip;
7960 ADD_BBLOCK (cfg, bblock);
7962 if (cfg->method == method) {
7963 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7964 if (breakpoint_id) {
7965 MONO_INST_NEW (cfg, ins, OP_BREAK);
7966 MONO_ADD_INS (bblock, ins);
7970 if (mono_security_cas_enabled ())
7971 secman = mono_security_manager_get_methods ();
7973 security = (secman && mono_security_method_has_declsec (method));
7974 /* at this point having security doesn't mean we have any code to generate */
7975 if (security && (cfg->method == method)) {
7976 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7977 * And we do not want to enter the next section (with allocation) if we
7978 * have nothing to generate */
7979 security = mono_declsec_get_demands (method, &actions);
7982 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7983 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7985 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7986 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7987 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7989 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7990 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7994 mono_custom_attrs_free (custom);
7997 custom = mono_custom_attrs_from_class (wrapped->klass);
7998 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8002 mono_custom_attrs_free (custom);
8005 /* not a P/Invoke after all */
8010 /* we use a separate basic block for the initialization code */
8011 NEW_BBLOCK (cfg, init_localsbb);
8012 cfg->bb_init = init_localsbb;
8013 init_localsbb->real_offset = cfg->real_offset;
8014 start_bblock->next_bb = init_localsbb;
8015 init_localsbb->next_bb = bblock;
8016 link_bblock (cfg, start_bblock, init_localsbb);
8017 link_bblock (cfg, init_localsbb, bblock);
8019 cfg->cbb = init_localsbb;
8021 if (cfg->gsharedvt && cfg->method == method) {
8022 MonoGSharedVtMethodInfo *info;
8023 MonoInst *var, *locals_var;
8026 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8027 info->method = cfg->method;
8028 info->count_entries = 16;
8029 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8030 cfg->gsharedvt_info = info;
8032 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8033 /* prevent it from being register allocated */
8034 //var->flags |= MONO_INST_VOLATILE;
8035 cfg->gsharedvt_info_var = var;
8037 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8038 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8040 /* Allocate locals */
8041 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8042 /* prevent it from being register allocated */
8043 //locals_var->flags |= MONO_INST_VOLATILE;
8044 cfg->gsharedvt_locals_var = locals_var;
8046 dreg = alloc_ireg (cfg);
8047 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8049 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8050 ins->dreg = locals_var->dreg;
8052 MONO_ADD_INS (cfg->cbb, ins);
8053 cfg->gsharedvt_locals_var_ins = ins;
8055 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8058 ins->flags |= MONO_INST_INIT;
8062 /* at this point we know, if security is TRUE, that some code needs to be generated */
8063 if (security && (cfg->method == method)) {
8066 cfg->stat_cas_demand_generation++;
8068 if (actions.demand.blob) {
8069 /* Add code for SecurityAction.Demand */
8070 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
8071 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
8072 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8073 mono_emit_method_call (cfg, secman->demand, args, NULL);
8075 if (actions.noncasdemand.blob) {
8076 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
8077 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
8078 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
8079 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
8080 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8081 mono_emit_method_call (cfg, secman->demand, args, NULL);
8083 if (actions.demandchoice.blob) {
8084 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
8085 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
8086 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
8087 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
8088 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
8092 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
8094 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
8097 if (mono_security_core_clr_enabled ()) {
8098 /* check if this is native code, e.g. an icall or a p/invoke */
8099 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8100 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8102 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8103 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8105 /* if this ia a native call then it can only be JITted from platform code */
8106 if ((icall || pinvk) && method->klass && method->klass->image) {
8107 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8108 MonoException *ex = icall ? mono_get_exception_security () :
8109 mono_get_exception_method_access ();
8110 emit_throw_exception (cfg, ex);
8117 CHECK_CFG_EXCEPTION;
8119 if (header->code_size == 0)
8122 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8127 if (cfg->method == method)
8128 mono_debug_init_method (cfg, bblock, breakpoint_id);
8130 for (n = 0; n < header->num_locals; ++n) {
8131 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8136 /* We force the vtable variable here for all shared methods
8137 for the possibility that they might show up in a stack
8138 trace where their exact instantiation is needed. */
8139 if (cfg->generic_sharing_context && method == cfg->method) {
8140 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8141 mini_method_get_context (method)->method_inst ||
8142 method->klass->valuetype) {
8143 mono_get_vtable_var (cfg);
8145 /* FIXME: Is there a better way to do this?
8146 We need the variable live for the duration
8147 of the whole method. */
8148 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8152 /* add a check for this != NULL to inlined methods */
8153 if (is_virtual_call) {
8156 NEW_ARGLOAD (cfg, arg_ins, 0);
8157 MONO_ADD_INS (cfg->cbb, arg_ins);
8158 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8161 skip_dead_blocks = !dont_verify;
8162 if (skip_dead_blocks) {
8163 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8168 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8169 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8172 start_new_bblock = 0;
8175 if (cfg->method == method)
8176 cfg->real_offset = ip - header->code;
8178 cfg->real_offset = inline_offset;
8183 if (start_new_bblock) {
8184 bblock->cil_length = ip - bblock->cil_code;
8185 if (start_new_bblock == 2) {
8186 g_assert (ip == tblock->cil_code);
8188 GET_BBLOCK (cfg, tblock, ip);
8190 bblock->next_bb = tblock;
8193 start_new_bblock = 0;
8194 for (i = 0; i < bblock->in_scount; ++i) {
8195 if (cfg->verbose_level > 3)
8196 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8197 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8201 g_slist_free (class_inits);
8204 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8205 link_bblock (cfg, bblock, tblock);
8206 if (sp != stack_start) {
8207 handle_stack_args (cfg, stack_start, sp - stack_start);
8209 CHECK_UNVERIFIABLE (cfg);
8211 bblock->next_bb = tblock;
8214 for (i = 0; i < bblock->in_scount; ++i) {
8215 if (cfg->verbose_level > 3)
8216 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8217 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8220 g_slist_free (class_inits);
8225 if (skip_dead_blocks) {
8226 int ip_offset = ip - header->code;
8228 if (ip_offset == bb->end)
8232 int op_size = mono_opcode_size (ip, end);
8233 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8235 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8237 if (ip_offset + op_size == bb->end) {
8238 MONO_INST_NEW (cfg, ins, OP_NOP);
8239 MONO_ADD_INS (bblock, ins);
8240 start_new_bblock = 1;
8248 * Sequence points are points where the debugger can place a breakpoint.
8249 * Currently, we generate these automatically at points where the IL
8252 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8254 * Make methods interruptable at the beginning, and at the targets of
8255 * backward branches.
8256 * Also, do this at the start of every bblock in methods with clauses too,
8257 * to be able to handle instructions with inprecise control flow like
8259 * Backward branches are handled at the end of method-to-ir ().
8261 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8263 /* Avoid sequence points on empty IL like .volatile */
8264 // FIXME: Enable this
8265 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8266 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8267 if (sp != stack_start)
8268 ins->flags |= MONO_INST_NONEMPTY_STACK;
8269 MONO_ADD_INS (cfg->cbb, ins);
8272 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8275 bblock->real_offset = cfg->real_offset;
8277 if ((cfg->method == method) && cfg->coverage_info) {
8278 guint32 cil_offset = ip - header->code;
8279 cfg->coverage_info->data [cil_offset].cil_code = ip;
8281 /* TODO: Use an increment here */
8282 #if defined(TARGET_X86)
8283 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8284 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8286 MONO_ADD_INS (cfg->cbb, ins);
8288 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8289 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8293 if (cfg->verbose_level > 3)
8294 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8298 if (seq_points && !sym_seq_points && sp != stack_start) {
8300 * The C# compiler uses these nops to notify the JIT that it should
8301 * insert seq points.
8303 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8304 MONO_ADD_INS (cfg->cbb, ins);
8306 if (cfg->keep_cil_nops)
8307 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8309 MONO_INST_NEW (cfg, ins, OP_NOP);
8311 MONO_ADD_INS (bblock, ins);
8314 if (should_insert_brekpoint (cfg->method)) {
8315 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8317 MONO_INST_NEW (cfg, ins, OP_NOP);
8320 MONO_ADD_INS (bblock, ins);
8326 CHECK_STACK_OVF (1);
8327 n = (*ip)-CEE_LDARG_0;
8329 EMIT_NEW_ARGLOAD (cfg, ins, n);
8337 CHECK_STACK_OVF (1);
8338 n = (*ip)-CEE_LDLOC_0;
8340 EMIT_NEW_LOCLOAD (cfg, ins, n);
8349 n = (*ip)-CEE_STLOC_0;
8352 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8354 emit_stloc_ir (cfg, sp, header, n);
8361 CHECK_STACK_OVF (1);
8364 EMIT_NEW_ARGLOAD (cfg, ins, n);
8370 CHECK_STACK_OVF (1);
8373 NEW_ARGLOADA (cfg, ins, n);
8374 MONO_ADD_INS (cfg->cbb, ins);
8384 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8386 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8391 CHECK_STACK_OVF (1);
8394 EMIT_NEW_LOCLOAD (cfg, ins, n);
8398 case CEE_LDLOCA_S: {
8399 unsigned char *tmp_ip;
8401 CHECK_STACK_OVF (1);
8402 CHECK_LOCAL (ip [1]);
8404 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8410 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8419 CHECK_LOCAL (ip [1]);
8420 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8422 emit_stloc_ir (cfg, sp, header, ip [1]);
8427 CHECK_STACK_OVF (1);
8428 EMIT_NEW_PCONST (cfg, ins, NULL);
8429 ins->type = STACK_OBJ;
8434 CHECK_STACK_OVF (1);
8435 EMIT_NEW_ICONST (cfg, ins, -1);
8448 CHECK_STACK_OVF (1);
8449 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8455 CHECK_STACK_OVF (1);
8457 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8463 CHECK_STACK_OVF (1);
8464 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8470 CHECK_STACK_OVF (1);
8471 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8472 ins->type = STACK_I8;
8473 ins->dreg = alloc_dreg (cfg, STACK_I8);
8475 ins->inst_l = (gint64)read64 (ip);
8476 MONO_ADD_INS (bblock, ins);
8482 gboolean use_aotconst = FALSE;
8484 #ifdef TARGET_POWERPC
8485 /* FIXME: Clean this up */
8486 if (cfg->compile_aot)
8487 use_aotconst = TRUE;
8490 /* FIXME: we should really allocate this only late in the compilation process */
8491 f = mono_domain_alloc (cfg->domain, sizeof (float));
8493 CHECK_STACK_OVF (1);
8499 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8501 dreg = alloc_freg (cfg);
8502 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8503 ins->type = cfg->r4_stack_type;
8505 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8506 ins->type = cfg->r4_stack_type;
8507 ins->dreg = alloc_dreg (cfg, STACK_R8);
8509 MONO_ADD_INS (bblock, ins);
8519 gboolean use_aotconst = FALSE;
8521 #ifdef TARGET_POWERPC
8522 /* FIXME: Clean this up */
8523 if (cfg->compile_aot)
8524 use_aotconst = TRUE;
8527 /* FIXME: we should really allocate this only late in the compilation process */
8528 d = mono_domain_alloc (cfg->domain, sizeof (double));
8530 CHECK_STACK_OVF (1);
8536 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8538 dreg = alloc_freg (cfg);
8539 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8540 ins->type = STACK_R8;
8542 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8543 ins->type = STACK_R8;
8544 ins->dreg = alloc_dreg (cfg, STACK_R8);
8546 MONO_ADD_INS (bblock, ins);
8555 MonoInst *temp, *store;
8557 CHECK_STACK_OVF (1);
8561 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8562 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8564 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8567 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8580 if (sp [0]->type == STACK_R8)
8581 /* we need to pop the value from the x86 FP stack */
8582 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8588 INLINE_FAILURE ("jmp");
8589 GSHAREDVT_FAILURE (*ip);
8592 if (stack_start != sp)
8594 token = read32 (ip + 1);
8595 /* FIXME: check the signature matches */
8596 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8598 if (!cmethod || mono_loader_get_last_error ())
8601 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8602 GENERIC_SHARING_FAILURE (CEE_JMP);
8604 if (mono_security_cas_enabled ())
8605 CHECK_CFG_EXCEPTION;
8607 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8609 if (ARCH_HAVE_OP_TAIL_CALL) {
8610 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8613 /* Handle tail calls similarly to calls */
8614 n = fsig->param_count + fsig->hasthis;
8618 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8619 call->method = cmethod;
8620 call->tail_call = TRUE;
8621 call->signature = mono_method_signature (cmethod);
8622 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8623 call->inst.inst_p0 = cmethod;
8624 for (i = 0; i < n; ++i)
8625 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8627 mono_arch_emit_call (cfg, call);
8628 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8629 MONO_ADD_INS (bblock, (MonoInst*)call);
8631 for (i = 0; i < num_args; ++i)
8632 /* Prevent arguments from being optimized away */
8633 arg_array [i]->flags |= MONO_INST_VOLATILE;
8635 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8636 ins = (MonoInst*)call;
8637 ins->inst_p0 = cmethod;
8638 MONO_ADD_INS (bblock, ins);
8642 start_new_bblock = 1;
8647 case CEE_CALLVIRT: {
8648 MonoInst *addr = NULL;
8649 MonoMethodSignature *fsig = NULL;
8651 int virtual = *ip == CEE_CALLVIRT;
8652 int calli = *ip == CEE_CALLI;
8653 gboolean pass_imt_from_rgctx = FALSE;
8654 MonoInst *imt_arg = NULL;
8655 MonoInst *keep_this_alive = NULL;
8656 gboolean pass_vtable = FALSE;
8657 gboolean pass_mrgctx = FALSE;
8658 MonoInst *vtable_arg = NULL;
8659 gboolean check_this = FALSE;
8660 gboolean supported_tail_call = FALSE;
8661 gboolean tail_call = FALSE;
8662 gboolean need_seq_point = FALSE;
8663 guint32 call_opcode = *ip;
8664 gboolean emit_widen = TRUE;
8665 gboolean push_res = TRUE;
8666 gboolean skip_ret = FALSE;
8667 gboolean delegate_invoke = FALSE;
8670 token = read32 (ip + 1);
8675 //GSHAREDVT_FAILURE (*ip);
8680 fsig = mini_get_signature (method, token, generic_context);
8681 n = fsig->param_count + fsig->hasthis;
8683 if (method->dynamic && fsig->pinvoke) {
8687 * This is a call through a function pointer using a pinvoke
8688 * signature. Have to create a wrapper and call that instead.
8689 * FIXME: This is very slow, need to create a wrapper at JIT time
8690 * instead based on the signature.
8692 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8693 EMIT_NEW_PCONST (cfg, args [1], fsig);
8695 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8698 MonoMethod *cil_method;
8700 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8701 cil_method = cmethod;
8703 if (constrained_call) {
8704 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8705 if (cfg->verbose_level > 2)
8706 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8707 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
8708 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
8709 cfg->generic_sharing_context)) {
8710 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context, &cfg->error);
8714 if (cfg->verbose_level > 2)
8715 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8717 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8719 * This is needed since get_method_constrained can't find
8720 * the method in klass representing a type var.
8721 * The type var is guaranteed to be a reference type in this
8724 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
8725 g_assert (!cmethod->klass->valuetype);
8727 cmethod = mono_get_method_constrained_checked (image, token, constrained_call, generic_context, &cil_method, &cfg->error);
8733 if (!cmethod || mono_loader_get_last_error ())
8735 if (!dont_verify && !cfg->skip_visibility) {
8736 MonoMethod *target_method = cil_method;
8737 if (method->is_inflated) {
8738 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8740 if (!mono_method_can_access_method (method_definition, target_method) &&
8741 !mono_method_can_access_method (method, cil_method))
8742 METHOD_ACCESS_FAILURE (method, cil_method);
8745 if (mono_security_core_clr_enabled ())
8746 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8748 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8749 /* MS.NET seems to silently convert this to a callvirt */
8754 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8755 * converts to a callvirt.
8757 * tests/bug-515884.il is an example of this behavior
8759 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8760 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8761 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8765 if (!cmethod->klass->inited)
8766 if (!mono_class_init (cmethod->klass))
8767 TYPE_LOAD_ERROR (cmethod->klass);
8769 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8770 mini_class_is_system_array (cmethod->klass)) {
8771 array_rank = cmethod->klass->rank;
8772 fsig = mono_method_signature (cmethod);
8774 fsig = mono_method_signature (cmethod);
8779 if (fsig->pinvoke) {
8780 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8781 check_for_pending_exc, cfg->compile_aot);
8782 fsig = mono_method_signature (wrapper);
8783 } else if (constrained_call) {
8784 fsig = mono_method_signature (cmethod);
8786 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8791 mono_save_token_info (cfg, image, token, cil_method);
8793 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8794 need_seq_point = TRUE;
8796 n = fsig->param_count + fsig->hasthis;
8798 /* Don't support calls made using type arguments for now */
8800 if (cfg->gsharedvt) {
8801 if (mini_is_gsharedvt_signature (cfg, fsig))
8802 GSHAREDVT_FAILURE (*ip);
8806 if (mono_security_cas_enabled ()) {
8807 if (check_linkdemand (cfg, method, cmethod))
8808 INLINE_FAILURE ("linkdemand");
8809 CHECK_CFG_EXCEPTION;
8812 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8813 g_assert_not_reached ();
8816 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8819 if (!cfg->generic_sharing_context && cmethod)
8820 g_assert (!mono_method_check_context_used (cmethod));
8824 //g_assert (!virtual || fsig->hasthis);
8828 if (constrained_call) {
8829 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8830 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8831 /* The 'Own method' case below */
8832 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8833 /* 'The type parameter is instantiated as a reference type' case below. */
8835 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_call, &emit_widen, &bblock);
8836 CHECK_CFG_EXCEPTION;
8843 * We have the `constrained.' prefix opcode.
8845 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8847 * The type parameter is instantiated as a valuetype,
8848 * but that type doesn't override the method we're
8849 * calling, so we need to box `this'.
8851 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8852 ins->klass = constrained_call;
8853 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8854 CHECK_CFG_EXCEPTION;
8855 } else if (!constrained_call->valuetype) {
8856 int dreg = alloc_ireg_ref (cfg);
8859 * The type parameter is instantiated as a reference
8860 * type. We have a managed pointer on the stack, so
8861 * we need to dereference it here.
8863 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8864 ins->type = STACK_OBJ;
8867 if (cmethod->klass->valuetype) {
8870 /* Interface method */
8873 mono_class_setup_vtable (constrained_call);
8874 CHECK_TYPELOAD (constrained_call);
8875 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8877 TYPE_LOAD_ERROR (constrained_call);
8878 slot = mono_method_get_vtable_slot (cmethod);
8880 TYPE_LOAD_ERROR (cmethod->klass);
8881 cmethod = constrained_call->vtable [ioffset + slot];
8883 if (cmethod->klass == mono_defaults.enum_class) {
8884 /* Enum implements some interfaces, so treat this as the first case */
8885 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8886 ins->klass = constrained_call;
8887 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8888 CHECK_CFG_EXCEPTION;
8893 constrained_call = NULL;
8896 if (!calli && check_call_signature (cfg, fsig, sp))
8899 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8900 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8901 delegate_invoke = TRUE;
8904 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8906 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8907 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8915 * If the callee is a shared method, then its static cctor
8916 * might not get called after the call was patched.
8918 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8919 emit_generic_class_init (cfg, cmethod->klass);
8920 CHECK_TYPELOAD (cmethod->klass);
8924 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8926 if (cfg->generic_sharing_context && cmethod) {
8927 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8929 context_used = mini_method_check_context_used (cfg, cmethod);
8931 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8932 /* Generic method interface
8933 calls are resolved via a
8934 helper function and don't
8936 if (!cmethod_context || !cmethod_context->method_inst)
8937 pass_imt_from_rgctx = TRUE;
8941 * If a shared method calls another
8942 * shared method then the caller must
8943 * have a generic sharing context
8944 * because the magic trampoline
8945 * requires it. FIXME: We shouldn't
8946 * have to force the vtable/mrgctx
8947 * variable here. Instead there
8948 * should be a flag in the cfg to
8949 * request a generic sharing context.
8952 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8953 mono_get_vtable_var (cfg);
8958 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8960 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8962 CHECK_TYPELOAD (cmethod->klass);
8963 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8968 g_assert (!vtable_arg);
8970 if (!cfg->compile_aot) {
8972 * emit_get_rgctx_method () calls mono_class_vtable () so check
8973 * for type load errors before.
8975 mono_class_setup_vtable (cmethod->klass);
8976 CHECK_TYPELOAD (cmethod->klass);
8979 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8981 /* !marshalbyref is needed to properly handle generic methods + remoting */
8982 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8983 MONO_METHOD_IS_FINAL (cmethod)) &&
8984 !mono_class_is_marshalbyref (cmethod->klass)) {
8991 if (pass_imt_from_rgctx) {
8992 g_assert (!pass_vtable);
8995 imt_arg = emit_get_rgctx_method (cfg, context_used,
8996 cmethod, MONO_RGCTX_INFO_METHOD);
9000 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9002 /* Calling virtual generic methods */
9003 if (cmethod && virtual &&
9004 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9005 !(MONO_METHOD_IS_FINAL (cmethod) &&
9006 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9007 fsig->generic_param_count &&
9008 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9009 MonoInst *this_temp, *this_arg_temp, *store;
9010 MonoInst *iargs [4];
9011 gboolean use_imt = FALSE;
9013 g_assert (fsig->is_inflated);
9015 /* Prevent inlining of methods that contain indirect calls */
9016 INLINE_FAILURE ("virtual generic call");
9018 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9019 GSHAREDVT_FAILURE (*ip);
9021 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9022 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
9027 g_assert (!imt_arg);
9029 g_assert (cmethod->is_inflated);
9030 imt_arg = emit_get_rgctx_method (cfg, context_used,
9031 cmethod, MONO_RGCTX_INFO_METHOD);
9032 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9034 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9035 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9036 MONO_ADD_INS (bblock, store);
9038 /* FIXME: This should be a managed pointer */
9039 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9041 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9042 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9043 cmethod, MONO_RGCTX_INFO_METHOD);
9044 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9045 addr = mono_emit_jit_icall (cfg,
9046 mono_helper_compile_generic_method, iargs);
9048 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9050 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9057 * Implement a workaround for the inherent races involved in locking:
9063 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9064 * try block, the Exit () won't be executed, see:
9065 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9066 * To work around this, we extend such try blocks to include the last x bytes
9067 * of the Monitor.Enter () call.
9069 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9070 MonoBasicBlock *tbb;
9072 GET_BBLOCK (cfg, tbb, ip + 5);
9074 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9075 * from Monitor.Enter like ArgumentNullException.
9077 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9078 /* Mark this bblock as needing to be extended */
9079 tbb->extend_try_block = TRUE;
9083 /* Conversion to a JIT intrinsic */
9084 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9086 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9087 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9094 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
9095 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9096 mono_method_check_inlining (cfg, cmethod)) {
9098 gboolean always = FALSE;
9100 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9101 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9102 /* Prevent inlining of methods that call wrappers */
9103 INLINE_FAILURE ("wrapper call");
9104 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9108 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9110 cfg->real_offset += 5;
9112 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9113 /* *sp is already set by inline_method */
9118 inline_costs += costs;
9124 /* Tail recursion elimination */
9125 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9126 gboolean has_vtargs = FALSE;
9129 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9130 INLINE_FAILURE ("tail call");
9132 /* keep it simple */
9133 for (i = fsig->param_count - 1; i >= 0; i--) {
9134 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9139 for (i = 0; i < n; ++i)
9140 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9141 MONO_INST_NEW (cfg, ins, OP_BR);
9142 MONO_ADD_INS (bblock, ins);
9143 tblock = start_bblock->out_bb [0];
9144 link_bblock (cfg, bblock, tblock);
9145 ins->inst_target_bb = tblock;
9146 start_new_bblock = 1;
9148 /* skip the CEE_RET, too */
9149 if (ip_in_bb (cfg, bblock, ip + 5))
9156 inline_costs += 10 * num_calls++;
9159 * Making generic calls out of gsharedvt methods.
9160 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9161 * patching gshared method addresses into a gsharedvt method.
9163 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class)) {
9164 MonoRgctxInfoType info_type;
9167 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9168 //GSHAREDVT_FAILURE (*ip);
9169 // disable for possible remoting calls
9170 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9171 GSHAREDVT_FAILURE (*ip);
9172 if (fsig->generic_param_count) {
9173 /* virtual generic call */
9174 g_assert (mono_use_imt);
9175 g_assert (!imt_arg);
9176 /* Same as the virtual generic case above */
9177 imt_arg = emit_get_rgctx_method (cfg, context_used,
9178 cmethod, MONO_RGCTX_INFO_METHOD);
9179 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9181 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9182 /* This can happen when we call a fully instantiated iface method */
9183 imt_arg = emit_get_rgctx_method (cfg, context_used,
9184 cmethod, MONO_RGCTX_INFO_METHOD);
9189 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
9190 /* test_0_multi_dim_arrays () in gshared.cs */
9191 GSHAREDVT_FAILURE (*ip);
9193 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9194 keep_this_alive = sp [0];
9196 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9197 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9199 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9200 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9202 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9204 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9206 * We pass the address to the gsharedvt trampoline in the rgctx reg
9208 MonoInst *callee = addr;
9210 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9212 GSHAREDVT_FAILURE (*ip);
9214 addr = emit_get_rgctx_sig (cfg, context_used,
9215 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9216 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9220 /* Generic sharing */
9223 * Use this if the callee is gsharedvt sharable too, since
9224 * at runtime we might find an instantiation so the call cannot
9225 * be patched (the 'no_patch' code path in mini-trampolines.c).
9227 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9228 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9229 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9230 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9231 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9232 INLINE_FAILURE ("gshared");
9234 g_assert (cfg->generic_sharing_context && cmethod);
9238 * We are compiling a call to a
9239 * generic method from shared code,
9240 * which means that we have to look up
9241 * the method in the rgctx and do an
9245 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9247 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9248 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9252 /* Indirect calls */
9254 if (call_opcode == CEE_CALL)
9255 g_assert (context_used);
9256 else if (call_opcode == CEE_CALLI)
9257 g_assert (!vtable_arg);
9259 /* FIXME: what the hell is this??? */
9260 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
9261 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
9263 /* Prevent inlining of methods with indirect calls */
9264 INLINE_FAILURE ("indirect call");
9266 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9271 * Instead of emitting an indirect call, emit a direct call
9272 * with the contents of the aotconst as the patch info.
9274 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9275 info_type = addr->inst_c1;
9276 info_data = addr->inst_p0;
9278 info_type = addr->inst_right->inst_c1;
9279 info_data = addr->inst_right->inst_left;
9282 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9283 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9288 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9296 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9297 MonoInst *val = sp [fsig->param_count];
9299 if (val->type == STACK_OBJ) {
9300 MonoInst *iargs [2];
9305 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9308 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9309 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9310 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9311 emit_write_barrier (cfg, addr, val);
9312 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9313 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9315 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9316 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9317 if (!cmethod->klass->element_class->valuetype && !readonly)
9318 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9319 CHECK_TYPELOAD (cmethod->klass);
9322 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9325 g_assert_not_reached ();
9332 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9336 /* Tail prefix / tail call optimization */
9338 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9339 /* FIXME: runtime generic context pointer for jumps? */
9340 /* FIXME: handle this for generic sharing eventually */
9341 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
9342 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9343 supported_tail_call = TRUE;
9345 if (supported_tail_call) {
9348 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9349 INLINE_FAILURE ("tail call");
9351 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9353 if (ARCH_HAVE_OP_TAIL_CALL) {
9354 /* Handle tail calls similarly to normal calls */
9357 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9359 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9360 call->tail_call = TRUE;
9361 call->method = cmethod;
9362 call->signature = mono_method_signature (cmethod);
9365 * We implement tail calls by storing the actual arguments into the
9366 * argument variables, then emitting a CEE_JMP.
9368 for (i = 0; i < n; ++i) {
9369 /* Prevent argument from being register allocated */
9370 arg_array [i]->flags |= MONO_INST_VOLATILE;
9371 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9373 ins = (MonoInst*)call;
9374 ins->inst_p0 = cmethod;
9375 ins->inst_p1 = arg_array [0];
9376 MONO_ADD_INS (bblock, ins);
9377 link_bblock (cfg, bblock, end_bblock);
9378 start_new_bblock = 1;
9380 // FIXME: Eliminate unreachable epilogs
9383 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9384 * only reachable from this call.
9386 GET_BBLOCK (cfg, tblock, ip + 5);
9387 if (tblock == bblock || tblock->in_count == 0)
9396 * Synchronized wrappers.
9397 * Its hard to determine where to replace a method with its synchronized
9398 * wrapper without causing an infinite recursion. The current solution is
9399 * to add the synchronized wrapper in the trampolines, and to
9400 * change the called method to a dummy wrapper, and resolve that wrapper
9401 * to the real method in mono_jit_compile_method ().
9403 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9404 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9405 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9406 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9410 INLINE_FAILURE ("call");
9411 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9412 imt_arg, vtable_arg);
9415 link_bblock (cfg, bblock, end_bblock);
9416 start_new_bblock = 1;
9418 // FIXME: Eliminate unreachable epilogs
9421 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9422 * only reachable from this call.
9424 GET_BBLOCK (cfg, tblock, ip + 5);
9425 if (tblock == bblock || tblock->in_count == 0)
9432 /* End of call, INS should contain the result of the call, if any */
9434 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9437 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9442 if (keep_this_alive) {
9443 MonoInst *dummy_use;
9445 /* See mono_emit_method_call_full () */
9446 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9449 CHECK_CFG_EXCEPTION;
9453 g_assert (*ip == CEE_RET);
9457 constrained_call = NULL;
9459 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9463 if (cfg->method != method) {
9464 /* return from inlined method */
9466 * If in_count == 0, that means the ret is unreachable due to
9467 * being preceeded by a throw. In that case, inline_method () will
9468 * handle setting the return value
9469 * (test case: test_0_inline_throw ()).
9471 if (return_var && cfg->cbb->in_count) {
9472 MonoType *ret_type = mono_method_signature (method)->ret;
9478 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9481 //g_assert (returnvar != -1);
9482 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9483 cfg->ret_var_set = TRUE;
9486 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9488 if (cfg->lmf_var && cfg->cbb->in_count)
9492 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
9494 if (seq_points && !sym_seq_points) {
9496 * Place a seq point here too even through the IL stack is not
9497 * empty, so a step over on
9500 * will work correctly.
9502 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9503 MONO_ADD_INS (cfg->cbb, ins);
9506 g_assert (!return_var);
9510 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9513 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9516 if (!cfg->vret_addr) {
9519 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9521 EMIT_NEW_RETLOADA (cfg, ret_addr);
9523 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9524 ins->klass = mono_class_from_mono_type (ret_type);
9527 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9528 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9529 MonoInst *iargs [1];
9533 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9534 mono_arch_emit_setret (cfg, method, conv);
9536 mono_arch_emit_setret (cfg, method, *sp);
9539 mono_arch_emit_setret (cfg, method, *sp);
9544 if (sp != stack_start)
9546 MONO_INST_NEW (cfg, ins, OP_BR);
9548 ins->inst_target_bb = end_bblock;
9549 MONO_ADD_INS (bblock, ins);
9550 link_bblock (cfg, bblock, end_bblock);
9551 start_new_bblock = 1;
9555 MONO_INST_NEW (cfg, ins, OP_BR);
9557 target = ip + 1 + (signed char)(*ip);
9559 GET_BBLOCK (cfg, tblock, target);
9560 link_bblock (cfg, bblock, tblock);
9561 ins->inst_target_bb = tblock;
9562 if (sp != stack_start) {
9563 handle_stack_args (cfg, stack_start, sp - stack_start);
9565 CHECK_UNVERIFIABLE (cfg);
9567 MONO_ADD_INS (bblock, ins);
9568 start_new_bblock = 1;
9569 inline_costs += BRANCH_COST;
9583 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9585 target = ip + 1 + *(signed char*)ip;
9591 inline_costs += BRANCH_COST;
9595 MONO_INST_NEW (cfg, ins, OP_BR);
9598 target = ip + 4 + (gint32)read32(ip);
9600 GET_BBLOCK (cfg, tblock, target);
9601 link_bblock (cfg, bblock, tblock);
9602 ins->inst_target_bb = tblock;
9603 if (sp != stack_start) {
9604 handle_stack_args (cfg, stack_start, sp - stack_start);
9606 CHECK_UNVERIFIABLE (cfg);
9609 MONO_ADD_INS (bblock, ins);
9611 start_new_bblock = 1;
9612 inline_costs += BRANCH_COST;
9619 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9620 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9621 guint32 opsize = is_short ? 1 : 4;
9623 CHECK_OPSIZE (opsize);
9625 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9628 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9633 GET_BBLOCK (cfg, tblock, target);
9634 link_bblock (cfg, bblock, tblock);
9635 GET_BBLOCK (cfg, tblock, ip);
9636 link_bblock (cfg, bblock, tblock);
9638 if (sp != stack_start) {
9639 handle_stack_args (cfg, stack_start, sp - stack_start);
9640 CHECK_UNVERIFIABLE (cfg);
9643 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9644 cmp->sreg1 = sp [0]->dreg;
9645 type_from_op (cfg, cmp, sp [0], NULL);
9648 #if SIZEOF_REGISTER == 4
9649 if (cmp->opcode == OP_LCOMPARE_IMM) {
9650 /* Convert it to OP_LCOMPARE */
9651 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9652 ins->type = STACK_I8;
9653 ins->dreg = alloc_dreg (cfg, STACK_I8);
9655 MONO_ADD_INS (bblock, ins);
9656 cmp->opcode = OP_LCOMPARE;
9657 cmp->sreg2 = ins->dreg;
9660 MONO_ADD_INS (bblock, cmp);
9662 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9663 type_from_op (cfg, ins, sp [0], NULL);
9664 MONO_ADD_INS (bblock, ins);
9665 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9666 GET_BBLOCK (cfg, tblock, target);
9667 ins->inst_true_bb = tblock;
9668 GET_BBLOCK (cfg, tblock, ip);
9669 ins->inst_false_bb = tblock;
9670 start_new_bblock = 2;
9673 inline_costs += BRANCH_COST;
9688 MONO_INST_NEW (cfg, ins, *ip);
9690 target = ip + 4 + (gint32)read32(ip);
9696 inline_costs += BRANCH_COST;
9700 MonoBasicBlock **targets;
9701 MonoBasicBlock *default_bblock;
9702 MonoJumpInfoBBTable *table;
9703 int offset_reg = alloc_preg (cfg);
9704 int target_reg = alloc_preg (cfg);
9705 int table_reg = alloc_preg (cfg);
9706 int sum_reg = alloc_preg (cfg);
9707 gboolean use_op_switch;
9711 n = read32 (ip + 1);
9714 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9718 CHECK_OPSIZE (n * sizeof (guint32));
9719 target = ip + n * sizeof (guint32);
9721 GET_BBLOCK (cfg, default_bblock, target);
9722 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9724 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9725 for (i = 0; i < n; ++i) {
9726 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9727 targets [i] = tblock;
9728 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9732 if (sp != stack_start) {
9734 * Link the current bb with the targets as well, so handle_stack_args
9735 * will set their in_stack correctly.
9737 link_bblock (cfg, bblock, default_bblock);
9738 for (i = 0; i < n; ++i)
9739 link_bblock (cfg, bblock, targets [i]);
9741 handle_stack_args (cfg, stack_start, sp - stack_start);
9743 CHECK_UNVERIFIABLE (cfg);
9746 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9747 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9750 for (i = 0; i < n; ++i)
9751 link_bblock (cfg, bblock, targets [i]);
9753 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9754 table->table = targets;
9755 table->table_size = n;
9757 use_op_switch = FALSE;
9759 /* ARM implements SWITCH statements differently */
9760 /* FIXME: Make it use the generic implementation */
9761 if (!cfg->compile_aot)
9762 use_op_switch = TRUE;
9765 if (COMPILE_LLVM (cfg))
9766 use_op_switch = TRUE;
9768 cfg->cbb->has_jump_table = 1;
9770 if (use_op_switch) {
9771 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9772 ins->sreg1 = src1->dreg;
9773 ins->inst_p0 = table;
9774 ins->inst_many_bb = targets;
9775 ins->klass = GUINT_TO_POINTER (n);
9776 MONO_ADD_INS (cfg->cbb, ins);
9778 if (sizeof (gpointer) == 8)
9779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9781 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9783 #if SIZEOF_REGISTER == 8
9784 /* The upper word might not be zero, and we add it to a 64 bit address later */
9785 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9788 if (cfg->compile_aot) {
9789 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9791 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9792 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9793 ins->inst_p0 = table;
9794 ins->dreg = table_reg;
9795 MONO_ADD_INS (cfg->cbb, ins);
9798 /* FIXME: Use load_memindex */
9799 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9800 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9801 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9803 start_new_bblock = 1;
9804 inline_costs += (BRANCH_COST * 2);
9824 dreg = alloc_freg (cfg);
9827 dreg = alloc_lreg (cfg);
9830 dreg = alloc_ireg_ref (cfg);
9833 dreg = alloc_preg (cfg);
9836 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9837 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9838 if (*ip == CEE_LDIND_R4)
9839 ins->type = cfg->r4_stack_type;
9840 ins->flags |= ins_flag;
9841 MONO_ADD_INS (bblock, ins);
9843 if (ins_flag & MONO_INST_VOLATILE) {
9844 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9845 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9861 if (ins_flag & MONO_INST_VOLATILE) {
9862 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9863 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9866 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9867 ins->flags |= ins_flag;
9870 MONO_ADD_INS (bblock, ins);
9872 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9873 emit_write_barrier (cfg, sp [0], sp [1]);
9882 MONO_INST_NEW (cfg, ins, (*ip));
9884 ins->sreg1 = sp [0]->dreg;
9885 ins->sreg2 = sp [1]->dreg;
9886 type_from_op (cfg, ins, sp [0], sp [1]);
9888 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9890 /* Use the immediate opcodes if possible */
9891 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9892 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9893 if (imm_opcode != -1) {
9894 ins->opcode = imm_opcode;
9895 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9898 NULLIFY_INS (sp [1]);
9902 MONO_ADD_INS ((cfg)->cbb, (ins));
9904 *sp++ = mono_decompose_opcode (cfg, ins);
9921 MONO_INST_NEW (cfg, ins, (*ip));
9923 ins->sreg1 = sp [0]->dreg;
9924 ins->sreg2 = sp [1]->dreg;
9925 type_from_op (cfg, ins, sp [0], sp [1]);
9927 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9928 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9930 /* FIXME: Pass opcode to is_inst_imm */
9932 /* Use the immediate opcodes if possible */
9933 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9936 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9937 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9938 /* Keep emulated opcodes which are optimized away later */
9939 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9940 imm_opcode = mono_op_to_op_imm (ins->opcode);
9943 if (imm_opcode != -1) {
9944 ins->opcode = imm_opcode;
9945 if (sp [1]->opcode == OP_I8CONST) {
9946 #if SIZEOF_REGISTER == 8
9947 ins->inst_imm = sp [1]->inst_l;
9949 ins->inst_ls_word = sp [1]->inst_ls_word;
9950 ins->inst_ms_word = sp [1]->inst_ms_word;
9954 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9957 /* Might be followed by an instruction added by add_widen_op */
9958 if (sp [1]->next == NULL)
9959 NULLIFY_INS (sp [1]);
9962 MONO_ADD_INS ((cfg)->cbb, (ins));
9964 *sp++ = mono_decompose_opcode (cfg, ins);
9977 case CEE_CONV_OVF_I8:
9978 case CEE_CONV_OVF_U8:
9982 /* Special case this earlier so we have long constants in the IR */
9983 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9984 int data = sp [-1]->inst_c0;
9985 sp [-1]->opcode = OP_I8CONST;
9986 sp [-1]->type = STACK_I8;
9987 #if SIZEOF_REGISTER == 8
9988 if ((*ip) == CEE_CONV_U8)
9989 sp [-1]->inst_c0 = (guint32)data;
9991 sp [-1]->inst_c0 = data;
9993 sp [-1]->inst_ls_word = data;
9994 if ((*ip) == CEE_CONV_U8)
9995 sp [-1]->inst_ms_word = 0;
9997 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9999 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10006 case CEE_CONV_OVF_I4:
10007 case CEE_CONV_OVF_I1:
10008 case CEE_CONV_OVF_I2:
10009 case CEE_CONV_OVF_I:
10010 case CEE_CONV_OVF_U:
10013 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10014 ADD_UNOP (CEE_CONV_OVF_I8);
10021 case CEE_CONV_OVF_U1:
10022 case CEE_CONV_OVF_U2:
10023 case CEE_CONV_OVF_U4:
10026 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10027 ADD_UNOP (CEE_CONV_OVF_U8);
10034 case CEE_CONV_OVF_I1_UN:
10035 case CEE_CONV_OVF_I2_UN:
10036 case CEE_CONV_OVF_I4_UN:
10037 case CEE_CONV_OVF_I8_UN:
10038 case CEE_CONV_OVF_U1_UN:
10039 case CEE_CONV_OVF_U2_UN:
10040 case CEE_CONV_OVF_U4_UN:
10041 case CEE_CONV_OVF_U8_UN:
10042 case CEE_CONV_OVF_I_UN:
10043 case CEE_CONV_OVF_U_UN:
10050 CHECK_CFG_EXCEPTION;
10054 case CEE_ADD_OVF_UN:
10056 case CEE_MUL_OVF_UN:
10058 case CEE_SUB_OVF_UN:
10064 GSHAREDVT_FAILURE (*ip);
10067 token = read32 (ip + 1);
10068 klass = mini_get_class (method, token, generic_context);
10069 CHECK_TYPELOAD (klass);
10071 if (generic_class_is_reference_type (cfg, klass)) {
10072 MonoInst *store, *load;
10073 int dreg = alloc_ireg_ref (cfg);
10075 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10076 load->flags |= ins_flag;
10077 MONO_ADD_INS (cfg->cbb, load);
10079 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10080 store->flags |= ins_flag;
10081 MONO_ADD_INS (cfg->cbb, store);
10083 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10084 emit_write_barrier (cfg, sp [0], sp [1]);
10086 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10092 int loc_index = -1;
10098 token = read32 (ip + 1);
10099 klass = mini_get_class (method, token, generic_context);
10100 CHECK_TYPELOAD (klass);
10102 /* Optimize the common ldobj+stloc combination */
10105 loc_index = ip [6];
10112 loc_index = ip [5] - CEE_STLOC_0;
10119 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10120 CHECK_LOCAL (loc_index);
10122 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10123 ins->dreg = cfg->locals [loc_index]->dreg;
10124 ins->flags |= ins_flag;
10127 if (ins_flag & MONO_INST_VOLATILE) {
10128 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10129 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10135 /* Optimize the ldobj+stobj combination */
10136 /* The reference case ends up being a load+store anyway */
10137 /* Skip this if the operation is volatile. */
10138 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10143 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10150 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10151 ins->flags |= ins_flag;
10154 if (ins_flag & MONO_INST_VOLATILE) {
10155 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10156 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10165 CHECK_STACK_OVF (1);
10167 n = read32 (ip + 1);
10169 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10170 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10171 ins->type = STACK_OBJ;
10174 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10175 MonoInst *iargs [1];
10176 char *str = mono_method_get_wrapper_data (method, n);
10178 if (cfg->compile_aot)
10179 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10181 EMIT_NEW_PCONST (cfg, iargs [0], str);
10182 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10184 if (cfg->opt & MONO_OPT_SHARED) {
10185 MonoInst *iargs [3];
10187 if (cfg->compile_aot) {
10188 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10190 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10191 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10192 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10193 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10194 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10196 if (bblock->out_of_line) {
10197 MonoInst *iargs [2];
10199 if (image == mono_defaults.corlib) {
10201 * Avoid relocations in AOT and save some space by using a
10202 * version of helper_ldstr specialized to mscorlib.
10204 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10205 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10207 /* Avoid creating the string object */
10208 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10209 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10210 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10214 if (cfg->compile_aot) {
10215 NEW_LDSTRCONST (cfg, ins, image, n);
10217 MONO_ADD_INS (bblock, ins);
10220 NEW_PCONST (cfg, ins, NULL);
10221 ins->type = STACK_OBJ;
10222 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10224 OUT_OF_MEMORY_FAILURE;
10227 MONO_ADD_INS (bblock, ins);
10236 MonoInst *iargs [2];
10237 MonoMethodSignature *fsig;
10240 MonoInst *vtable_arg = NULL;
10243 token = read32 (ip + 1);
10244 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10245 if (!cmethod || mono_loader_get_last_error ())
10247 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10250 mono_save_token_info (cfg, image, token, cmethod);
10252 if (!mono_class_init (cmethod->klass))
10253 TYPE_LOAD_ERROR (cmethod->klass);
10255 context_used = mini_method_check_context_used (cfg, cmethod);
10257 if (mono_security_cas_enabled ()) {
10258 if (check_linkdemand (cfg, method, cmethod))
10259 INLINE_FAILURE ("linkdemand");
10260 CHECK_CFG_EXCEPTION;
10261 } else if (mono_security_core_clr_enabled ()) {
10262 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10265 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10266 emit_generic_class_init (cfg, cmethod->klass);
10267 CHECK_TYPELOAD (cmethod->klass);
10271 if (cfg->gsharedvt) {
10272 if (mini_is_gsharedvt_variable_signature (sig))
10273 GSHAREDVT_FAILURE (*ip);
10277 n = fsig->param_count;
10281 * Generate smaller code for the common newobj <exception> instruction in
10282 * argument checking code.
10284 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10285 is_exception_class (cmethod->klass) && n <= 2 &&
10286 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10287 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10288 MonoInst *iargs [3];
10292 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10295 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10298 iargs [1] = sp [0];
10299 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10302 iargs [1] = sp [0];
10303 iargs [2] = sp [1];
10304 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10307 g_assert_not_reached ();
10315 /* move the args to allow room for 'this' in the first position */
10321 /* check_call_signature () requires sp[0] to be set */
10322 this_ins.type = STACK_OBJ;
10323 sp [0] = &this_ins;
10324 if (check_call_signature (cfg, fsig, sp))
10329 if (mini_class_is_system_array (cmethod->klass)) {
10330 *sp = emit_get_rgctx_method (cfg, context_used,
10331 cmethod, MONO_RGCTX_INFO_METHOD);
10333 /* Avoid varargs in the common case */
10334 if (fsig->param_count == 1)
10335 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10336 else if (fsig->param_count == 2)
10337 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10338 else if (fsig->param_count == 3)
10339 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10340 else if (fsig->param_count == 4)
10341 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10343 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10344 } else if (cmethod->string_ctor) {
10345 g_assert (!context_used);
10346 g_assert (!vtable_arg);
10347 /* we simply pass a null pointer */
10348 EMIT_NEW_PCONST (cfg, *sp, NULL);
10349 /* now call the string ctor */
10350 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10352 if (cmethod->klass->valuetype) {
10353 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10354 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10355 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10360 * The code generated by mini_emit_virtual_call () expects
10361 * iargs [0] to be a boxed instance, but luckily the vcall
10362 * will be transformed into a normal call there.
10364 } else if (context_used) {
10365 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10368 MonoVTable *vtable = NULL;
10370 if (!cfg->compile_aot)
10371 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10372 CHECK_TYPELOAD (cmethod->klass);
10375 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10376 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10377 * As a workaround, we call class cctors before allocating objects.
10379 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10380 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10381 if (cfg->verbose_level > 2)
10382 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10383 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10386 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10389 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10392 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10394 /* Now call the actual ctor */
10395 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10396 CHECK_CFG_EXCEPTION;
10399 if (alloc == NULL) {
10401 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10402 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10410 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10411 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10414 case CEE_CASTCLASS:
10418 token = read32 (ip + 1);
10419 klass = mini_get_class (method, token, generic_context);
10420 CHECK_TYPELOAD (klass);
10421 if (sp [0]->type != STACK_OBJ)
10424 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10425 CHECK_CFG_EXCEPTION;
10434 token = read32 (ip + 1);
10435 klass = mini_get_class (method, token, generic_context);
10436 CHECK_TYPELOAD (klass);
10437 if (sp [0]->type != STACK_OBJ)
10440 context_used = mini_class_check_context_used (cfg, klass);
10442 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10443 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10444 MonoInst *args [3];
10450 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10453 if (cfg->compile_aot)
10454 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
10456 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10458 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10461 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10462 MonoMethod *mono_isinst;
10463 MonoInst *iargs [1];
10466 mono_isinst = mono_marshal_get_isinst (klass);
10467 iargs [0] = sp [0];
10469 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10470 iargs, ip, cfg->real_offset, TRUE, &bblock);
10471 CHECK_CFG_EXCEPTION;
10472 g_assert (costs > 0);
10475 cfg->real_offset += 5;
10479 inline_costs += costs;
10482 ins = handle_isinst (cfg, klass, *sp, context_used);
10483 CHECK_CFG_EXCEPTION;
10490 case CEE_UNBOX_ANY: {
10491 MonoInst *res, *addr;
10496 token = read32 (ip + 1);
10497 klass = mini_get_class (method, token, generic_context);
10498 CHECK_TYPELOAD (klass);
10500 mono_save_token_info (cfg, image, token, klass);
10502 context_used = mini_class_check_context_used (cfg, klass);
10504 if (mini_is_gsharedvt_klass (cfg, klass)) {
10505 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10507 } else if (generic_class_is_reference_type (cfg, klass)) {
10508 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10509 CHECK_CFG_EXCEPTION;
10510 } else if (mono_class_is_nullable (klass)) {
10511 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10513 addr = handle_unbox (cfg, klass, sp, context_used);
10515 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10526 MonoClass *enum_class;
10527 MonoMethod *has_flag;
10533 token = read32 (ip + 1);
10534 klass = mini_get_class (method, token, generic_context);
10535 CHECK_TYPELOAD (klass);
10537 mono_save_token_info (cfg, image, token, klass);
10539 context_used = mini_class_check_context_used (cfg, klass);
10541 if (generic_class_is_reference_type (cfg, klass)) {
10547 if (klass == mono_defaults.void_class)
10549 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10551 /* frequent check in generic code: box (struct), brtrue */
10556 * <push int/long ptr>
10559 * constrained. MyFlags
10560 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10562 * If we find this sequence and the operand types on box and constrained
10563 * are equal, we can emit a specialized instruction sequence instead of
10564 * the very slow HasFlag () call.
10566 if ((cfg->opt & MONO_OPT_INTRINS) &&
10567 /* Cheap checks first. */
10568 ip + 5 + 6 + 5 < end &&
10569 ip [5] == CEE_PREFIX1 &&
10570 ip [6] == CEE_CONSTRAINED_ &&
10571 ip [11] == CEE_CALLVIRT &&
10572 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10573 mono_class_is_enum (klass) &&
10574 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10575 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10576 has_flag->klass == mono_defaults.enum_class &&
10577 !strcmp (has_flag->name, "HasFlag") &&
10578 has_flag->signature->hasthis &&
10579 has_flag->signature->param_count == 1) {
10580 CHECK_TYPELOAD (enum_class);
10582 if (enum_class == klass) {
10583 MonoInst *enum_this, *enum_flag;
10588 enum_this = sp [0];
10589 enum_flag = sp [1];
10591 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10596 // FIXME: LLVM can't handle the inconsistent bb linking
10597 if (!mono_class_is_nullable (klass) &&
10598 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10599 (ip [5] == CEE_BRTRUE ||
10600 ip [5] == CEE_BRTRUE_S ||
10601 ip [5] == CEE_BRFALSE ||
10602 ip [5] == CEE_BRFALSE_S)) {
10603 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10605 MonoBasicBlock *true_bb, *false_bb;
10609 if (cfg->verbose_level > 3) {
10610 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10611 printf ("<box+brtrue opt>\n");
10616 case CEE_BRFALSE_S:
10619 target = ip + 1 + (signed char)(*ip);
10626 target = ip + 4 + (gint)(read32 (ip));
10630 g_assert_not_reached ();
10634 * We need to link both bblocks, since it is needed for handling stack
10635 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10636 * Branching to only one of them would lead to inconsistencies, so
10637 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10639 GET_BBLOCK (cfg, true_bb, target);
10640 GET_BBLOCK (cfg, false_bb, ip);
10642 mono_link_bblock (cfg, cfg->cbb, true_bb);
10643 mono_link_bblock (cfg, cfg->cbb, false_bb);
10645 if (sp != stack_start) {
10646 handle_stack_args (cfg, stack_start, sp - stack_start);
10648 CHECK_UNVERIFIABLE (cfg);
10651 if (COMPILE_LLVM (cfg)) {
10652 dreg = alloc_ireg (cfg);
10653 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10656 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10658 /* The JIT can't eliminate the iconst+compare */
10659 MONO_INST_NEW (cfg, ins, OP_BR);
10660 ins->inst_target_bb = is_true ? true_bb : false_bb;
10661 MONO_ADD_INS (cfg->cbb, ins);
10664 start_new_bblock = 1;
10668 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10670 CHECK_CFG_EXCEPTION;
10679 token = read32 (ip + 1);
10680 klass = mini_get_class (method, token, generic_context);
10681 CHECK_TYPELOAD (klass);
10683 mono_save_token_info (cfg, image, token, klass);
10685 context_used = mini_class_check_context_used (cfg, klass);
10687 if (mono_class_is_nullable (klass)) {
10690 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10691 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10695 ins = handle_unbox (cfg, klass, sp, context_used);
10708 MonoClassField *field;
10709 #ifndef DISABLE_REMOTING
10713 gboolean is_instance;
10715 gpointer addr = NULL;
10716 gboolean is_special_static;
10718 MonoInst *store_val = NULL;
10719 MonoInst *thread_ins;
10722 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10724 if (op == CEE_STFLD) {
10727 store_val = sp [1];
10732 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10734 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10737 if (op == CEE_STSFLD) {
10740 store_val = sp [0];
10745 token = read32 (ip + 1);
10746 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10747 field = mono_method_get_wrapper_data (method, token);
10748 klass = field->parent;
10751 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10754 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10755 FIELD_ACCESS_FAILURE (method, field);
10756 mono_class_init (klass);
10758 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10761 /* if the class is Critical then transparent code cannot access it's fields */
10762 if (!is_instance && mono_security_core_clr_enabled ())
10763 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10765 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10766 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10767 if (mono_security_core_clr_enabled ())
10768 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10772 * LDFLD etc. is usable on static fields as well, so convert those cases to
10775 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10787 g_assert_not_reached ();
10789 is_instance = FALSE;
10792 context_used = mini_class_check_context_used (cfg, klass);
10794 /* INSTANCE CASE */
10796 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10797 if (op == CEE_STFLD) {
10798 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10800 #ifndef DISABLE_REMOTING
10801 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10802 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10803 MonoInst *iargs [5];
10805 GSHAREDVT_FAILURE (op);
10807 iargs [0] = sp [0];
10808 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10809 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10810 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10812 iargs [4] = sp [1];
10814 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10815 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10816 iargs, ip, cfg->real_offset, TRUE, &bblock);
10817 CHECK_CFG_EXCEPTION;
10818 g_assert (costs > 0);
10820 cfg->real_offset += 5;
10822 inline_costs += costs;
10824 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10831 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10833 if (mini_is_gsharedvt_klass (cfg, klass)) {
10834 MonoInst *offset_ins;
10836 context_used = mini_class_check_context_used (cfg, klass);
10838 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10839 dreg = alloc_ireg_mp (cfg);
10840 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10841 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10842 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10844 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10846 if (sp [0]->opcode != OP_LDADDR)
10847 store->flags |= MONO_INST_FAULT;
10849 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10850 /* insert call to write barrier */
10854 dreg = alloc_ireg_mp (cfg);
10855 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10856 emit_write_barrier (cfg, ptr, sp [1]);
10859 store->flags |= ins_flag;
10866 #ifndef DISABLE_REMOTING
10867 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10868 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10869 MonoInst *iargs [4];
10871 GSHAREDVT_FAILURE (op);
10873 iargs [0] = sp [0];
10874 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10875 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10876 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10877 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10878 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10879 iargs, ip, cfg->real_offset, TRUE, &bblock);
10880 CHECK_CFG_EXCEPTION;
10881 g_assert (costs > 0);
10883 cfg->real_offset += 5;
10887 inline_costs += costs;
10889 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10895 if (sp [0]->type == STACK_VTYPE) {
10898 /* Have to compute the address of the variable */
10900 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10902 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10904 g_assert (var->klass == klass);
10906 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10910 if (op == CEE_LDFLDA) {
10911 if (is_magic_tls_access (field)) {
10912 GSHAREDVT_FAILURE (*ip);
10914 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10916 if (sp [0]->type == STACK_OBJ) {
10917 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10918 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10921 dreg = alloc_ireg_mp (cfg);
10923 if (mini_is_gsharedvt_klass (cfg, klass)) {
10924 MonoInst *offset_ins;
10926 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10927 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10929 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10931 ins->klass = mono_class_from_mono_type (field->type);
10932 ins->type = STACK_MP;
10938 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10940 if (mini_is_gsharedvt_klass (cfg, klass)) {
10941 MonoInst *offset_ins;
10943 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10944 dreg = alloc_ireg_mp (cfg);
10945 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10946 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10948 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10950 load->flags |= ins_flag;
10951 if (sp [0]->opcode != OP_LDADDR)
10952 load->flags |= MONO_INST_FAULT;
10966 * We can only support shared generic static
10967 * field access on architectures where the
10968 * trampoline code has been extended to handle
10969 * the generic class init.
10971 #ifndef MONO_ARCH_VTABLE_REG
10972 GENERIC_SHARING_FAILURE (op);
10975 context_used = mini_class_check_context_used (cfg, klass);
10977 ftype = mono_field_get_type (field);
10979 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10982 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10983 * to be called here.
10985 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10986 mono_class_vtable (cfg->domain, klass);
10987 CHECK_TYPELOAD (klass);
10989 mono_domain_lock (cfg->domain);
10990 if (cfg->domain->special_static_fields)
10991 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10992 mono_domain_unlock (cfg->domain);
10994 is_special_static = mono_class_field_is_special_static (field);
10996 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10997 thread_ins = mono_get_thread_intrinsic (cfg);
11001 /* Generate IR to compute the field address */
11002 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11004 * Fast access to TLS data
11005 * Inline version of get_thread_static_data () in
11009 int idx, static_data_reg, array_reg, dreg;
11011 GSHAREDVT_FAILURE (op);
11013 // offset &= 0x7fffffff;
11014 // idx = (offset >> 24) - 1;
11015 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
11016 MONO_ADD_INS (cfg->cbb, thread_ins);
11017 static_data_reg = alloc_ireg (cfg);
11018 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11020 if (cfg->compile_aot) {
11021 int offset_reg, offset2_reg, idx_reg;
11023 /* For TLS variables, this will return the TLS offset */
11024 EMIT_NEW_SFLDACONST (cfg, ins, field);
11025 offset_reg = ins->dreg;
11026 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11027 idx_reg = alloc_ireg (cfg);
11028 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
11029 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
11030 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11031 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11032 array_reg = alloc_ireg (cfg);
11033 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11034 offset2_reg = alloc_ireg (cfg);
11035 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
11036 dreg = alloc_ireg (cfg);
11037 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11039 offset = (gsize)addr & 0x7fffffff;
11040 idx = (offset >> 24) - 1;
11042 array_reg = alloc_ireg (cfg);
11043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11044 dreg = alloc_ireg (cfg);
11045 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
11047 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11048 (cfg->compile_aot && is_special_static) ||
11049 (context_used && is_special_static)) {
11050 MonoInst *iargs [2];
11052 g_assert (field->parent);
11053 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11054 if (context_used) {
11055 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11056 field, MONO_RGCTX_INFO_CLASS_FIELD);
11058 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11060 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11061 } else if (context_used) {
11062 MonoInst *static_data;
11065 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11066 method->klass->name_space, method->klass->name, method->name,
11067 depth, field->offset);
11070 if (mono_class_needs_cctor_run (klass, method))
11071 emit_generic_class_init (cfg, klass);
11074 * The pointer we're computing here is
11076 * super_info.static_data + field->offset
11078 static_data = emit_get_rgctx_klass (cfg, context_used,
11079 klass, MONO_RGCTX_INFO_STATIC_DATA);
11081 if (mini_is_gsharedvt_klass (cfg, klass)) {
11082 MonoInst *offset_ins;
11084 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11085 dreg = alloc_ireg_mp (cfg);
11086 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11087 } else if (field->offset == 0) {
11090 int addr_reg = mono_alloc_preg (cfg);
11091 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11093 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11094 MonoInst *iargs [2];
11096 g_assert (field->parent);
11097 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11098 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11099 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11101 MonoVTable *vtable = NULL;
11103 if (!cfg->compile_aot)
11104 vtable = mono_class_vtable (cfg->domain, klass);
11105 CHECK_TYPELOAD (klass);
11108 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11109 if (!(g_slist_find (class_inits, klass))) {
11110 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11111 if (cfg->verbose_level > 2)
11112 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11113 class_inits = g_slist_prepend (class_inits, klass);
11116 if (cfg->run_cctors) {
11118 /* This makes so that inline cannot trigger */
11119 /* .cctors: too many apps depend on them */
11120 /* running with a specific order... */
11122 if (! vtable->initialized)
11123 INLINE_FAILURE ("class init");
11124 ex = mono_runtime_class_init_full (vtable, FALSE);
11126 set_exception_object (cfg, ex);
11127 goto exception_exit;
11131 if (cfg->compile_aot)
11132 EMIT_NEW_SFLDACONST (cfg, ins, field);
11135 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11137 EMIT_NEW_PCONST (cfg, ins, addr);
11140 MonoInst *iargs [1];
11141 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11142 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11146 /* Generate IR to do the actual load/store operation */
11148 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11149 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11150 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11153 if (op == CEE_LDSFLDA) {
11154 ins->klass = mono_class_from_mono_type (ftype);
11155 ins->type = STACK_PTR;
11157 } else if (op == CEE_STSFLD) {
11160 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11161 store->flags |= ins_flag;
11163 gboolean is_const = FALSE;
11164 MonoVTable *vtable = NULL;
11165 gpointer addr = NULL;
11167 if (!context_used) {
11168 vtable = mono_class_vtable (cfg->domain, klass);
11169 CHECK_TYPELOAD (klass);
11171 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11172 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11173 int ro_type = ftype->type;
11175 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11176 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11177 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11180 GSHAREDVT_FAILURE (op);
11182 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11185 case MONO_TYPE_BOOLEAN:
11187 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11191 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11194 case MONO_TYPE_CHAR:
11196 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11200 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11205 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11209 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11214 case MONO_TYPE_PTR:
11215 case MONO_TYPE_FNPTR:
11216 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11217 type_to_eval_stack_type ((cfg), field->type, *sp);
11220 case MONO_TYPE_STRING:
11221 case MONO_TYPE_OBJECT:
11222 case MONO_TYPE_CLASS:
11223 case MONO_TYPE_SZARRAY:
11224 case MONO_TYPE_ARRAY:
11225 if (!mono_gc_is_moving ()) {
11226 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11227 type_to_eval_stack_type ((cfg), field->type, *sp);
11235 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11240 case MONO_TYPE_VALUETYPE:
11250 CHECK_STACK_OVF (1);
11252 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11253 load->flags |= ins_flag;
11259 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11260 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11261 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11272 token = read32 (ip + 1);
11273 klass = mini_get_class (method, token, generic_context);
11274 CHECK_TYPELOAD (klass);
11275 if (ins_flag & MONO_INST_VOLATILE) {
11276 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11277 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11279 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11280 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11281 ins->flags |= ins_flag;
11282 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11283 generic_class_is_reference_type (cfg, klass)) {
11284 /* insert call to write barrier */
11285 emit_write_barrier (cfg, sp [0], sp [1]);
11297 const char *data_ptr;
11299 guint32 field_token;
11305 token = read32 (ip + 1);
11307 klass = mini_get_class (method, token, generic_context);
11308 CHECK_TYPELOAD (klass);
11310 context_used = mini_class_check_context_used (cfg, klass);
11312 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11313 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11314 ins->sreg1 = sp [0]->dreg;
11315 ins->type = STACK_I4;
11316 ins->dreg = alloc_ireg (cfg);
11317 MONO_ADD_INS (cfg->cbb, ins);
11318 *sp = mono_decompose_opcode (cfg, ins);
11321 if (context_used) {
11322 MonoInst *args [3];
11323 MonoClass *array_class = mono_array_class_get (klass, 1);
11324 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11326 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11329 args [0] = emit_get_rgctx_klass (cfg, context_used,
11330 array_class, MONO_RGCTX_INFO_VTABLE);
11335 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11337 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11339 if (cfg->opt & MONO_OPT_SHARED) {
11340 /* Decompose now to avoid problems with references to the domainvar */
11341 MonoInst *iargs [3];
11343 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11344 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11345 iargs [2] = sp [0];
11347 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11349 /* Decompose later since it is needed by abcrem */
11350 MonoClass *array_type = mono_array_class_get (klass, 1);
11351 mono_class_vtable (cfg->domain, array_type);
11352 CHECK_TYPELOAD (array_type);
11354 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11355 ins->dreg = alloc_ireg_ref (cfg);
11356 ins->sreg1 = sp [0]->dreg;
11357 ins->inst_newa_class = klass;
11358 ins->type = STACK_OBJ;
11359 ins->klass = array_type;
11360 MONO_ADD_INS (cfg->cbb, ins);
11361 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11362 cfg->cbb->has_array_access = TRUE;
11364 /* Needed so mono_emit_load_get_addr () gets called */
11365 mono_get_got_var (cfg);
11375 * we inline/optimize the initialization sequence if possible.
11376 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11377 * for small sizes open code the memcpy
11378 * ensure the rva field is big enough
11380 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11381 MonoMethod *memcpy_method = get_memcpy_method ();
11382 MonoInst *iargs [3];
11383 int add_reg = alloc_ireg_mp (cfg);
11385 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11386 if (cfg->compile_aot) {
11387 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11389 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11391 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11392 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11401 if (sp [0]->type != STACK_OBJ)
11404 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11405 ins->dreg = alloc_preg (cfg);
11406 ins->sreg1 = sp [0]->dreg;
11407 ins->type = STACK_I4;
11408 /* This flag will be inherited by the decomposition */
11409 ins->flags |= MONO_INST_FAULT;
11410 MONO_ADD_INS (cfg->cbb, ins);
11411 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11412 cfg->cbb->has_array_access = TRUE;
11420 if (sp [0]->type != STACK_OBJ)
11423 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11425 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11426 CHECK_TYPELOAD (klass);
11427 /* we need to make sure that this array is exactly the type it needs
11428 * to be for correctness. the wrappers are lax with their usage
11429 * so we need to ignore them here
11431 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11432 MonoClass *array_class = mono_array_class_get (klass, 1);
11433 mini_emit_check_array_type (cfg, sp [0], array_class);
11434 CHECK_TYPELOAD (array_class);
11438 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11443 case CEE_LDELEM_I1:
11444 case CEE_LDELEM_U1:
11445 case CEE_LDELEM_I2:
11446 case CEE_LDELEM_U2:
11447 case CEE_LDELEM_I4:
11448 case CEE_LDELEM_U4:
11449 case CEE_LDELEM_I8:
11451 case CEE_LDELEM_R4:
11452 case CEE_LDELEM_R8:
11453 case CEE_LDELEM_REF: {
11459 if (*ip == CEE_LDELEM) {
11461 token = read32 (ip + 1);
11462 klass = mini_get_class (method, token, generic_context);
11463 CHECK_TYPELOAD (klass);
11464 mono_class_init (klass);
11467 klass = array_access_to_klass (*ip);
11469 if (sp [0]->type != STACK_OBJ)
11472 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11474 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11475 // FIXME-VT: OP_ICONST optimization
11476 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11477 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11478 ins->opcode = OP_LOADV_MEMBASE;
11479 } else if (sp [1]->opcode == OP_ICONST) {
11480 int array_reg = sp [0]->dreg;
11481 int index_reg = sp [1]->dreg;
11482 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11484 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11485 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11487 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11488 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11491 if (*ip == CEE_LDELEM)
11498 case CEE_STELEM_I1:
11499 case CEE_STELEM_I2:
11500 case CEE_STELEM_I4:
11501 case CEE_STELEM_I8:
11502 case CEE_STELEM_R4:
11503 case CEE_STELEM_R8:
11504 case CEE_STELEM_REF:
11509 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11511 if (*ip == CEE_STELEM) {
11513 token = read32 (ip + 1);
11514 klass = mini_get_class (method, token, generic_context);
11515 CHECK_TYPELOAD (klass);
11516 mono_class_init (klass);
11519 klass = array_access_to_klass (*ip);
11521 if (sp [0]->type != STACK_OBJ)
11524 emit_array_store (cfg, klass, sp, TRUE);
11526 if (*ip == CEE_STELEM)
11533 case CEE_CKFINITE: {
11537 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11538 ins->sreg1 = sp [0]->dreg;
11539 ins->dreg = alloc_freg (cfg);
11540 ins->type = STACK_R8;
11541 MONO_ADD_INS (bblock, ins);
11543 *sp++ = mono_decompose_opcode (cfg, ins);
11548 case CEE_REFANYVAL: {
11549 MonoInst *src_var, *src;
11551 int klass_reg = alloc_preg (cfg);
11552 int dreg = alloc_preg (cfg);
11554 GSHAREDVT_FAILURE (*ip);
11557 MONO_INST_NEW (cfg, ins, *ip);
11560 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11561 CHECK_TYPELOAD (klass);
11563 context_used = mini_class_check_context_used (cfg, klass);
11566 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11568 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11569 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11572 if (context_used) {
11573 MonoInst *klass_ins;
11575 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11576 klass, MONO_RGCTX_INFO_KLASS);
11579 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11580 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11582 mini_emit_class_check (cfg, klass_reg, klass);
11584 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11585 ins->type = STACK_MP;
11590 case CEE_MKREFANY: {
11591 MonoInst *loc, *addr;
11593 GSHAREDVT_FAILURE (*ip);
11596 MONO_INST_NEW (cfg, ins, *ip);
11599 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11600 CHECK_TYPELOAD (klass);
11602 context_used = mini_class_check_context_used (cfg, klass);
11604 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11605 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11607 if (context_used) {
11608 MonoInst *const_ins;
11609 int type_reg = alloc_preg (cfg);
11611 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11612 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11614 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11615 } else if (cfg->compile_aot) {
11616 int const_reg = alloc_preg (cfg);
11617 int type_reg = alloc_preg (cfg);
11619 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11620 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11622 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11624 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11625 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11627 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11629 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11630 ins->type = STACK_VTYPE;
11631 ins->klass = mono_defaults.typed_reference_class;
11636 case CEE_LDTOKEN: {
11638 MonoClass *handle_class;
11640 CHECK_STACK_OVF (1);
11643 n = read32 (ip + 1);
11645 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11646 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11647 handle = mono_method_get_wrapper_data (method, n);
11648 handle_class = mono_method_get_wrapper_data (method, n + 1);
11649 if (handle_class == mono_defaults.typehandle_class)
11650 handle = &((MonoClass*)handle)->byval_arg;
11653 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11658 mono_class_init (handle_class);
11659 if (cfg->generic_sharing_context) {
11660 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11661 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11662 /* This case handles ldtoken
11663 of an open type, like for
11666 } else if (handle_class == mono_defaults.typehandle_class) {
11667 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11668 } else if (handle_class == mono_defaults.fieldhandle_class)
11669 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11670 else if (handle_class == mono_defaults.methodhandle_class)
11671 context_used = mini_method_check_context_used (cfg, handle);
11673 g_assert_not_reached ();
11676 if ((cfg->opt & MONO_OPT_SHARED) &&
11677 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11678 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11679 MonoInst *addr, *vtvar, *iargs [3];
11680 int method_context_used;
11682 method_context_used = mini_method_check_context_used (cfg, method);
11684 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11686 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11687 EMIT_NEW_ICONST (cfg, iargs [1], n);
11688 if (method_context_used) {
11689 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11690 method, MONO_RGCTX_INFO_METHOD);
11691 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11693 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11694 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11696 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11698 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11700 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11702 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11703 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11704 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11705 (cmethod->klass == mono_defaults.systemtype_class) &&
11706 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11707 MonoClass *tclass = mono_class_from_mono_type (handle);
11709 mono_class_init (tclass);
11710 if (context_used) {
11711 ins = emit_get_rgctx_klass (cfg, context_used,
11712 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11713 } else if (cfg->compile_aot) {
11714 if (method->wrapper_type) {
11715 mono_error_init (&error); //got to do it since there are multiple conditionals below
11716 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11717 /* Special case for static synchronized wrappers */
11718 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11720 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11721 /* FIXME: n is not a normal token */
11723 EMIT_NEW_PCONST (cfg, ins, NULL);
11726 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11729 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11731 ins->type = STACK_OBJ;
11732 ins->klass = cmethod->klass;
11735 MonoInst *addr, *vtvar;
11737 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11739 if (context_used) {
11740 if (handle_class == mono_defaults.typehandle_class) {
11741 ins = emit_get_rgctx_klass (cfg, context_used,
11742 mono_class_from_mono_type (handle),
11743 MONO_RGCTX_INFO_TYPE);
11744 } else if (handle_class == mono_defaults.methodhandle_class) {
11745 ins = emit_get_rgctx_method (cfg, context_used,
11746 handle, MONO_RGCTX_INFO_METHOD);
11747 } else if (handle_class == mono_defaults.fieldhandle_class) {
11748 ins = emit_get_rgctx_field (cfg, context_used,
11749 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11751 g_assert_not_reached ();
11753 } else if (cfg->compile_aot) {
11754 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11756 EMIT_NEW_PCONST (cfg, ins, handle);
11758 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11759 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11760 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11770 MONO_INST_NEW (cfg, ins, OP_THROW);
11772 ins->sreg1 = sp [0]->dreg;
11774 bblock->out_of_line = TRUE;
11775 MONO_ADD_INS (bblock, ins);
11776 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11777 MONO_ADD_INS (bblock, ins);
11780 link_bblock (cfg, bblock, end_bblock);
11781 start_new_bblock = 1;
11783 case CEE_ENDFINALLY:
11784 /* mono_save_seq_point_info () depends on this */
11785 if (sp != stack_start)
11786 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11787 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11788 MONO_ADD_INS (bblock, ins);
11790 start_new_bblock = 1;
11793 * Control will leave the method so empty the stack, otherwise
11794 * the next basic block will start with a nonempty stack.
11796 while (sp != stack_start) {
11801 case CEE_LEAVE_S: {
11804 if (*ip == CEE_LEAVE) {
11806 target = ip + 5 + (gint32)read32(ip + 1);
11809 target = ip + 2 + (signed char)(ip [1]);
11812 /* empty the stack */
11813 while (sp != stack_start) {
11818 * If this leave statement is in a catch block, check for a
11819 * pending exception, and rethrow it if necessary.
11820 * We avoid doing this in runtime invoke wrappers, since those are called
11821 * by native code which excepts the wrapper to catch all exceptions.
11823 for (i = 0; i < header->num_clauses; ++i) {
11824 MonoExceptionClause *clause = &header->clauses [i];
11827 * Use <= in the final comparison to handle clauses with multiple
11828 * leave statements, like in bug #78024.
11829 * The ordering of the exception clauses guarantees that we find the
11830 * innermost clause.
11832 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11834 MonoBasicBlock *dont_throw;
11839 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11842 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11844 NEW_BBLOCK (cfg, dont_throw);
11847 * Currently, we always rethrow the abort exception, despite the
11848 * fact that this is not correct. See thread6.cs for an example.
11849 * But propagating the abort exception is more important than
11850 * getting the sematics right.
11852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11853 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11854 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11856 MONO_START_BB (cfg, dont_throw);
11861 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11863 MonoExceptionClause *clause;
11865 for (tmp = handlers; tmp; tmp = tmp->next) {
11866 clause = tmp->data;
11867 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11869 link_bblock (cfg, bblock, tblock);
11870 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11871 ins->inst_target_bb = tblock;
11872 ins->inst_eh_block = clause;
11873 MONO_ADD_INS (bblock, ins);
11874 bblock->has_call_handler = 1;
11875 if (COMPILE_LLVM (cfg)) {
11876 MonoBasicBlock *target_bb;
11879 * Link the finally bblock with the target, since it will
11880 * conceptually branch there.
11881 * FIXME: Have to link the bblock containing the endfinally.
11883 GET_BBLOCK (cfg, target_bb, target);
11884 link_bblock (cfg, tblock, target_bb);
11887 g_list_free (handlers);
11890 MONO_INST_NEW (cfg, ins, OP_BR);
11891 MONO_ADD_INS (bblock, ins);
11892 GET_BBLOCK (cfg, tblock, target);
11893 link_bblock (cfg, bblock, tblock);
11894 ins->inst_target_bb = tblock;
11895 start_new_bblock = 1;
11897 if (*ip == CEE_LEAVE)
11906 * Mono specific opcodes
11908 case MONO_CUSTOM_PREFIX: {
11910 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11914 case CEE_MONO_ICALL: {
11916 MonoJitICallInfo *info;
11918 token = read32 (ip + 2);
11919 func = mono_method_get_wrapper_data (method, token);
11920 info = mono_find_jit_icall_by_addr (func);
11922 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11925 CHECK_STACK (info->sig->param_count);
11926 sp -= info->sig->param_count;
11928 ins = mono_emit_jit_icall (cfg, info->func, sp);
11929 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11933 inline_costs += 10 * num_calls++;
11937 case CEE_MONO_LDPTR: {
11940 CHECK_STACK_OVF (1);
11942 token = read32 (ip + 2);
11944 ptr = mono_method_get_wrapper_data (method, token);
11945 /* FIXME: Generalize this */
11946 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11947 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11952 EMIT_NEW_PCONST (cfg, ins, ptr);
11955 inline_costs += 10 * num_calls++;
11956 /* Can't embed random pointers into AOT code */
11960 case CEE_MONO_JIT_ICALL_ADDR: {
11961 MonoJitICallInfo *callinfo;
11964 CHECK_STACK_OVF (1);
11966 token = read32 (ip + 2);
11968 ptr = mono_method_get_wrapper_data (method, token);
11969 callinfo = mono_find_jit_icall_by_addr (ptr);
11970 g_assert (callinfo);
11971 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11974 inline_costs += 10 * num_calls++;
11977 case CEE_MONO_ICALL_ADDR: {
11978 MonoMethod *cmethod;
11981 CHECK_STACK_OVF (1);
11983 token = read32 (ip + 2);
11985 cmethod = mono_method_get_wrapper_data (method, token);
11987 if (cfg->compile_aot) {
11988 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11990 ptr = mono_lookup_internal_call (cmethod);
11992 EMIT_NEW_PCONST (cfg, ins, ptr);
11998 case CEE_MONO_VTADDR: {
11999 MonoInst *src_var, *src;
12005 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12006 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12011 case CEE_MONO_NEWOBJ: {
12012 MonoInst *iargs [2];
12014 CHECK_STACK_OVF (1);
12016 token = read32 (ip + 2);
12017 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12018 mono_class_init (klass);
12019 NEW_DOMAINCONST (cfg, iargs [0]);
12020 MONO_ADD_INS (cfg->cbb, iargs [0]);
12021 NEW_CLASSCONST (cfg, iargs [1], klass);
12022 MONO_ADD_INS (cfg->cbb, iargs [1]);
12023 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12025 inline_costs += 10 * num_calls++;
12028 case CEE_MONO_OBJADDR:
12031 MONO_INST_NEW (cfg, ins, OP_MOVE);
12032 ins->dreg = alloc_ireg_mp (cfg);
12033 ins->sreg1 = sp [0]->dreg;
12034 ins->type = STACK_MP;
12035 MONO_ADD_INS (cfg->cbb, ins);
12039 case CEE_MONO_LDNATIVEOBJ:
12041 * Similar to LDOBJ, but instead load the unmanaged
12042 * representation of the vtype to the stack.
12047 token = read32 (ip + 2);
12048 klass = mono_method_get_wrapper_data (method, token);
12049 g_assert (klass->valuetype);
12050 mono_class_init (klass);
12053 MonoInst *src, *dest, *temp;
12056 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12057 temp->backend.is_pinvoke = 1;
12058 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12059 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12061 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12062 dest->type = STACK_VTYPE;
12063 dest->klass = klass;
12069 case CEE_MONO_RETOBJ: {
12071 * Same as RET, but return the native representation of a vtype
12074 g_assert (cfg->ret);
12075 g_assert (mono_method_signature (method)->pinvoke);
12080 token = read32 (ip + 2);
12081 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12083 if (!cfg->vret_addr) {
12084 g_assert (cfg->ret_var_is_local);
12086 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12088 EMIT_NEW_RETLOADA (cfg, ins);
12090 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12092 if (sp != stack_start)
12095 MONO_INST_NEW (cfg, ins, OP_BR);
12096 ins->inst_target_bb = end_bblock;
12097 MONO_ADD_INS (bblock, ins);
12098 link_bblock (cfg, bblock, end_bblock);
12099 start_new_bblock = 1;
12103 case CEE_MONO_CISINST:
12104 case CEE_MONO_CCASTCLASS: {
12109 token = read32 (ip + 2);
12110 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12111 if (ip [1] == CEE_MONO_CISINST)
12112 ins = handle_cisinst (cfg, klass, sp [0]);
12114 ins = handle_ccastclass (cfg, klass, sp [0]);
12120 case CEE_MONO_SAVE_LMF:
12121 case CEE_MONO_RESTORE_LMF:
12122 #ifdef MONO_ARCH_HAVE_LMF_OPS
12123 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12124 MONO_ADD_INS (bblock, ins);
12125 cfg->need_lmf_area = TRUE;
12129 case CEE_MONO_CLASSCONST:
12130 CHECK_STACK_OVF (1);
12132 token = read32 (ip + 2);
12133 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12136 inline_costs += 10 * num_calls++;
12138 case CEE_MONO_NOT_TAKEN:
12139 bblock->out_of_line = TRUE;
12142 case CEE_MONO_TLS: {
12145 CHECK_STACK_OVF (1);
12147 key = (gint32)read32 (ip + 2);
12148 g_assert (key < TLS_KEY_NUM);
12150 ins = mono_create_tls_get (cfg, key);
12152 if (cfg->compile_aot) {
12154 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12155 ins->dreg = alloc_preg (cfg);
12156 ins->type = STACK_PTR;
12158 g_assert_not_reached ();
12161 ins->type = STACK_PTR;
12162 MONO_ADD_INS (bblock, ins);
12167 case CEE_MONO_DYN_CALL: {
12168 MonoCallInst *call;
12170 /* It would be easier to call a trampoline, but that would put an
12171 * extra frame on the stack, confusing exception handling. So
12172 * implement it inline using an opcode for now.
12175 if (!cfg->dyn_call_var) {
12176 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12177 /* prevent it from being register allocated */
12178 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12181 /* Has to use a call inst since it local regalloc expects it */
12182 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12183 ins = (MonoInst*)call;
12185 ins->sreg1 = sp [0]->dreg;
12186 ins->sreg2 = sp [1]->dreg;
12187 MONO_ADD_INS (bblock, ins);
12189 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12192 inline_costs += 10 * num_calls++;
12196 case CEE_MONO_MEMORY_BARRIER: {
12198 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12202 case CEE_MONO_JIT_ATTACH: {
12203 MonoInst *args [16], *domain_ins;
12204 MonoInst *ad_ins, *jit_tls_ins;
12205 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12207 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12209 EMIT_NEW_PCONST (cfg, ins, NULL);
12210 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12212 ad_ins = mono_get_domain_intrinsic (cfg);
12213 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12215 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12216 NEW_BBLOCK (cfg, next_bb);
12217 NEW_BBLOCK (cfg, call_bb);
12219 if (cfg->compile_aot) {
12220 /* AOT code is only used in the root domain */
12221 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12223 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12225 MONO_ADD_INS (cfg->cbb, ad_ins);
12226 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12227 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12229 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12230 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12231 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12233 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12234 MONO_START_BB (cfg, call_bb);
12237 if (cfg->compile_aot) {
12238 /* AOT code is only used in the root domain */
12239 EMIT_NEW_PCONST (cfg, args [0], NULL);
12241 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12243 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12244 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12247 MONO_START_BB (cfg, next_bb);
12253 case CEE_MONO_JIT_DETACH: {
12254 MonoInst *args [16];
12256 /* Restore the original domain */
12257 dreg = alloc_ireg (cfg);
12258 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12259 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12264 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12270 case CEE_PREFIX1: {
12273 case CEE_ARGLIST: {
12274 /* somewhat similar to LDTOKEN */
12275 MonoInst *addr, *vtvar;
12276 CHECK_STACK_OVF (1);
12277 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12279 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12280 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12282 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12283 ins->type = STACK_VTYPE;
12284 ins->klass = mono_defaults.argumenthandle_class;
12294 MonoInst *cmp, *arg1, *arg2;
12302 * The following transforms:
12303 * CEE_CEQ into OP_CEQ
12304 * CEE_CGT into OP_CGT
12305 * CEE_CGT_UN into OP_CGT_UN
12306 * CEE_CLT into OP_CLT
12307 * CEE_CLT_UN into OP_CLT_UN
12309 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12311 MONO_INST_NEW (cfg, ins, cmp->opcode);
12312 cmp->sreg1 = arg1->dreg;
12313 cmp->sreg2 = arg2->dreg;
12314 type_from_op (cfg, cmp, arg1, arg2);
12316 add_widen_op (cfg, cmp, &arg1, &arg2);
12317 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12318 cmp->opcode = OP_LCOMPARE;
12319 else if (arg1->type == STACK_R4)
12320 cmp->opcode = OP_RCOMPARE;
12321 else if (arg1->type == STACK_R8)
12322 cmp->opcode = OP_FCOMPARE;
12324 cmp->opcode = OP_ICOMPARE;
12325 MONO_ADD_INS (bblock, cmp);
12326 ins->type = STACK_I4;
12327 ins->dreg = alloc_dreg (cfg, ins->type);
12328 type_from_op (cfg, ins, arg1, arg2);
12330 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12332 * The backends expect the fceq opcodes to do the
12335 ins->sreg1 = cmp->sreg1;
12336 ins->sreg2 = cmp->sreg2;
12339 MONO_ADD_INS (bblock, ins);
12345 MonoInst *argconst;
12346 MonoMethod *cil_method;
12348 CHECK_STACK_OVF (1);
12350 n = read32 (ip + 2);
12351 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12352 if (!cmethod || mono_loader_get_last_error ())
12354 mono_class_init (cmethod->klass);
12356 mono_save_token_info (cfg, image, n, cmethod);
12358 context_used = mini_method_check_context_used (cfg, cmethod);
12360 cil_method = cmethod;
12361 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12362 METHOD_ACCESS_FAILURE (method, cil_method);
12364 if (mono_security_cas_enabled ()) {
12365 if (check_linkdemand (cfg, method, cmethod))
12366 INLINE_FAILURE ("linkdemand");
12367 CHECK_CFG_EXCEPTION;
12368 } else if (mono_security_core_clr_enabled ()) {
12369 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12373 * Optimize the common case of ldftn+delegate creation
12375 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12376 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12377 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12378 MonoInst *target_ins, *handle_ins;
12379 MonoMethod *invoke;
12380 int invoke_context_used;
12382 invoke = mono_get_delegate_invoke (ctor_method->klass);
12383 if (!invoke || !mono_method_signature (invoke))
12386 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12388 target_ins = sp [-1];
12390 if (mono_security_core_clr_enabled ())
12391 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12393 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12394 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12395 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12397 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12401 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12402 /* FIXME: SGEN support */
12403 if (invoke_context_used == 0) {
12405 if (cfg->verbose_level > 3)
12406 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12407 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12410 CHECK_CFG_EXCEPTION;
12421 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12422 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12426 inline_costs += 10 * num_calls++;
12429 case CEE_LDVIRTFTN: {
12430 MonoInst *args [2];
12434 n = read32 (ip + 2);
12435 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12436 if (!cmethod || mono_loader_get_last_error ())
12438 mono_class_init (cmethod->klass);
12440 context_used = mini_method_check_context_used (cfg, cmethod);
12442 if (mono_security_cas_enabled ()) {
12443 if (check_linkdemand (cfg, method, cmethod))
12444 INLINE_FAILURE ("linkdemand");
12445 CHECK_CFG_EXCEPTION;
12446 } else if (mono_security_core_clr_enabled ()) {
12447 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12451 * Optimize the common case of ldvirtftn+delegate creation
12453 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12454 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12455 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12456 MonoInst *target_ins, *handle_ins;
12457 MonoMethod *invoke;
12458 int invoke_context_used;
12460 invoke = mono_get_delegate_invoke (ctor_method->klass);
12461 if (!invoke || !mono_method_signature (invoke))
12464 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12466 target_ins = sp [-1];
12468 if (mono_security_core_clr_enabled ())
12469 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12471 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12472 /* FIXME: SGEN support */
12473 if (invoke_context_used == 0) {
12475 if (cfg->verbose_level > 3)
12476 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12477 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
12480 CHECK_CFG_EXCEPTION;
12494 args [1] = emit_get_rgctx_method (cfg, context_used,
12495 cmethod, MONO_RGCTX_INFO_METHOD);
12498 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12500 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12503 inline_costs += 10 * num_calls++;
12507 CHECK_STACK_OVF (1);
12509 n = read16 (ip + 2);
12511 EMIT_NEW_ARGLOAD (cfg, ins, n);
12516 CHECK_STACK_OVF (1);
12518 n = read16 (ip + 2);
12520 NEW_ARGLOADA (cfg, ins, n);
12521 MONO_ADD_INS (cfg->cbb, ins);
12529 n = read16 (ip + 2);
12531 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12533 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12537 CHECK_STACK_OVF (1);
12539 n = read16 (ip + 2);
12541 EMIT_NEW_LOCLOAD (cfg, ins, n);
12546 unsigned char *tmp_ip;
12547 CHECK_STACK_OVF (1);
12549 n = read16 (ip + 2);
12552 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12558 EMIT_NEW_LOCLOADA (cfg, ins, n);
12567 n = read16 (ip + 2);
12569 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12571 emit_stloc_ir (cfg, sp, header, n);
12578 if (sp != stack_start)
12580 if (cfg->method != method)
12582 * Inlining this into a loop in a parent could lead to
12583 * stack overflows which is different behavior than the
12584 * non-inlined case, thus disable inlining in this case.
12586 INLINE_FAILURE("localloc");
12588 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12589 ins->dreg = alloc_preg (cfg);
12590 ins->sreg1 = sp [0]->dreg;
12591 ins->type = STACK_PTR;
12592 MONO_ADD_INS (cfg->cbb, ins);
12594 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12596 ins->flags |= MONO_INST_INIT;
12601 case CEE_ENDFILTER: {
12602 MonoExceptionClause *clause, *nearest;
12603 int cc, nearest_num;
12607 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12609 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12610 ins->sreg1 = (*sp)->dreg;
12611 MONO_ADD_INS (bblock, ins);
12612 start_new_bblock = 1;
12617 for (cc = 0; cc < header->num_clauses; ++cc) {
12618 clause = &header->clauses [cc];
12619 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12620 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12621 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12626 g_assert (nearest);
12627 if ((ip - header->code) != nearest->handler_offset)
12632 case CEE_UNALIGNED_:
12633 ins_flag |= MONO_INST_UNALIGNED;
12634 /* FIXME: record alignment? we can assume 1 for now */
12638 case CEE_VOLATILE_:
12639 ins_flag |= MONO_INST_VOLATILE;
12643 ins_flag |= MONO_INST_TAILCALL;
12644 cfg->flags |= MONO_CFG_HAS_TAIL;
12645 /* Can't inline tail calls at this time */
12646 inline_costs += 100000;
12653 token = read32 (ip + 2);
12654 klass = mini_get_class (method, token, generic_context);
12655 CHECK_TYPELOAD (klass);
12656 if (generic_class_is_reference_type (cfg, klass))
12657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12659 mini_emit_initobj (cfg, *sp, NULL, klass);
12663 case CEE_CONSTRAINED_:
12665 token = read32 (ip + 2);
12666 constrained_call = mini_get_class (method, token, generic_context);
12667 CHECK_TYPELOAD (constrained_call);
12671 case CEE_INITBLK: {
12672 MonoInst *iargs [3];
12676 /* Skip optimized paths for volatile operations. */
12677 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12678 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12679 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12680 /* emit_memset only works when val == 0 */
12681 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12684 iargs [0] = sp [0];
12685 iargs [1] = sp [1];
12686 iargs [2] = sp [2];
12687 if (ip [1] == CEE_CPBLK) {
12689 * FIXME: It's unclear whether we should be emitting both the acquire
12690 * and release barriers for cpblk. It is technically both a load and
12691 * store operation, so it seems like that's the sensible thing to do.
12693 * FIXME: We emit full barriers on both sides of the operation for
12694 * simplicity. We should have a separate atomic memcpy method instead.
12696 MonoMethod *memcpy_method = get_memcpy_method ();
12698 if (ins_flag & MONO_INST_VOLATILE)
12699 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12701 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12702 call->flags |= ins_flag;
12704 if (ins_flag & MONO_INST_VOLATILE)
12705 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12707 MonoMethod *memset_method = get_memset_method ();
12708 if (ins_flag & MONO_INST_VOLATILE) {
12709 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12710 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12712 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12713 call->flags |= ins_flag;
12724 ins_flag |= MONO_INST_NOTYPECHECK;
12726 ins_flag |= MONO_INST_NORANGECHECK;
12727 /* we ignore the no-nullcheck for now since we
12728 * really do it explicitly only when doing callvirt->call
12732 case CEE_RETHROW: {
12734 int handler_offset = -1;
12736 for (i = 0; i < header->num_clauses; ++i) {
12737 MonoExceptionClause *clause = &header->clauses [i];
12738 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12739 handler_offset = clause->handler_offset;
12744 bblock->flags |= BB_EXCEPTION_UNSAFE;
12746 if (handler_offset == -1)
12749 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12750 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12751 ins->sreg1 = load->dreg;
12752 MONO_ADD_INS (bblock, ins);
12754 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12755 MONO_ADD_INS (bblock, ins);
12758 link_bblock (cfg, bblock, end_bblock);
12759 start_new_bblock = 1;
12767 CHECK_STACK_OVF (1);
12769 token = read32 (ip + 2);
12770 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12771 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12774 val = mono_type_size (type, &ialign);
12776 MonoClass *klass = mini_get_class (method, token, generic_context);
12777 CHECK_TYPELOAD (klass);
12779 val = mono_type_size (&klass->byval_arg, &ialign);
12781 if (mini_is_gsharedvt_klass (cfg, klass))
12782 GSHAREDVT_FAILURE (*ip);
12784 EMIT_NEW_ICONST (cfg, ins, val);
12789 case CEE_REFANYTYPE: {
12790 MonoInst *src_var, *src;
12792 GSHAREDVT_FAILURE (*ip);
12798 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12800 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12801 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12802 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12807 case CEE_READONLY_:
12820 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12830 g_warning ("opcode 0x%02x not handled", *ip);
12834 if (start_new_bblock != 1)
12837 bblock->cil_length = ip - bblock->cil_code;
12838 if (bblock->next_bb) {
12839 /* This could already be set because of inlining, #693905 */
12840 MonoBasicBlock *bb = bblock;
12842 while (bb->next_bb)
12844 bb->next_bb = end_bblock;
12846 bblock->next_bb = end_bblock;
12849 if (cfg->method == method && cfg->domainvar) {
12851 MonoInst *get_domain;
12853 cfg->cbb = init_localsbb;
12855 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12856 MONO_ADD_INS (cfg->cbb, get_domain);
12858 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12860 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12861 MONO_ADD_INS (cfg->cbb, store);
12864 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12865 if (cfg->compile_aot)
12866 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12867 mono_get_got_var (cfg);
12870 if (cfg->method == method && cfg->got_var)
12871 mono_emit_load_got_addr (cfg);
12873 if (init_localsbb) {
12874 cfg->cbb = init_localsbb;
12876 for (i = 0; i < header->num_locals; ++i) {
12877 emit_init_local (cfg, i, header->locals [i], init_locals);
12881 if (cfg->init_ref_vars && cfg->method == method) {
12882 /* Emit initialization for ref vars */
12883 // FIXME: Avoid duplication initialization for IL locals.
12884 for (i = 0; i < cfg->num_varinfo; ++i) {
12885 MonoInst *ins = cfg->varinfo [i];
12887 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12888 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12892 if (cfg->lmf_var && cfg->method == method) {
12893 cfg->cbb = init_localsbb;
12894 emit_push_lmf (cfg);
12897 cfg->cbb = init_localsbb;
12898 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12901 MonoBasicBlock *bb;
12904 * Make seq points at backward branch targets interruptable.
12906 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12907 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12908 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12911 /* Add a sequence point for method entry/exit events */
12912 if (cfg->gen_seq_points_debug_data) {
12913 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12914 MONO_ADD_INS (init_localsbb, ins);
12915 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12916 MONO_ADD_INS (cfg->bb_exit, ins);
12920 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12921 * the code they refer to was dead (#11880).
12923 if (sym_seq_points) {
12924 for (i = 0; i < header->code_size; ++i) {
12925 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12928 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12929 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12936 if (cfg->method == method) {
12937 MonoBasicBlock *bb;
12938 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12939 bb->region = mono_find_block_region (cfg, bb->real_offset);
12941 mono_create_spvar_for_region (cfg, bb->region);
12942 if (cfg->verbose_level > 2)
12943 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12947 if (inline_costs < 0) {
12950 /* Method is too large */
12951 mname = mono_method_full_name (method, TRUE);
12952 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12953 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12957 if ((cfg->verbose_level > 2) && (cfg->method == method))
12958 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12963 g_assert (!mono_error_ok (&cfg->error));
12967 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12971 set_exception_type_from_invalid_il (cfg, method, ip);
12975 g_slist_free (class_inits);
12976 mono_basic_block_free (original_bb);
12977 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12978 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12979 if (cfg->exception_type)
12982 return inline_costs;
12986 store_membase_reg_to_store_membase_imm (int opcode)
12989 case OP_STORE_MEMBASE_REG:
12990 return OP_STORE_MEMBASE_IMM;
12991 case OP_STOREI1_MEMBASE_REG:
12992 return OP_STOREI1_MEMBASE_IMM;
12993 case OP_STOREI2_MEMBASE_REG:
12994 return OP_STOREI2_MEMBASE_IMM;
12995 case OP_STOREI4_MEMBASE_REG:
12996 return OP_STOREI4_MEMBASE_IMM;
12997 case OP_STOREI8_MEMBASE_REG:
12998 return OP_STOREI8_MEMBASE_IMM;
13000 g_assert_not_reached ();
13007 mono_op_to_op_imm (int opcode)
13011 return OP_IADD_IMM;
13013 return OP_ISUB_IMM;
13015 return OP_IDIV_IMM;
13017 return OP_IDIV_UN_IMM;
13019 return OP_IREM_IMM;
13021 return OP_IREM_UN_IMM;
13023 return OP_IMUL_IMM;
13025 return OP_IAND_IMM;
13029 return OP_IXOR_IMM;
13031 return OP_ISHL_IMM;
13033 return OP_ISHR_IMM;
13035 return OP_ISHR_UN_IMM;
13038 return OP_LADD_IMM;
13040 return OP_LSUB_IMM;
13042 return OP_LAND_IMM;
13046 return OP_LXOR_IMM;
13048 return OP_LSHL_IMM;
13050 return OP_LSHR_IMM;
13052 return OP_LSHR_UN_IMM;
13053 #if SIZEOF_REGISTER == 8
13055 return OP_LREM_IMM;
13059 return OP_COMPARE_IMM;
13061 return OP_ICOMPARE_IMM;
13063 return OP_LCOMPARE_IMM;
13065 case OP_STORE_MEMBASE_REG:
13066 return OP_STORE_MEMBASE_IMM;
13067 case OP_STOREI1_MEMBASE_REG:
13068 return OP_STOREI1_MEMBASE_IMM;
13069 case OP_STOREI2_MEMBASE_REG:
13070 return OP_STOREI2_MEMBASE_IMM;
13071 case OP_STOREI4_MEMBASE_REG:
13072 return OP_STOREI4_MEMBASE_IMM;
13074 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13076 return OP_X86_PUSH_IMM;
13077 case OP_X86_COMPARE_MEMBASE_REG:
13078 return OP_X86_COMPARE_MEMBASE_IMM;
13080 #if defined(TARGET_AMD64)
13081 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13082 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13084 case OP_VOIDCALL_REG:
13085 return OP_VOIDCALL;
13093 return OP_LOCALLOC_IMM;
13100 ldind_to_load_membase (int opcode)
13104 return OP_LOADI1_MEMBASE;
13106 return OP_LOADU1_MEMBASE;
13108 return OP_LOADI2_MEMBASE;
13110 return OP_LOADU2_MEMBASE;
13112 return OP_LOADI4_MEMBASE;
13114 return OP_LOADU4_MEMBASE;
13116 return OP_LOAD_MEMBASE;
13117 case CEE_LDIND_REF:
13118 return OP_LOAD_MEMBASE;
13120 return OP_LOADI8_MEMBASE;
13122 return OP_LOADR4_MEMBASE;
13124 return OP_LOADR8_MEMBASE;
13126 g_assert_not_reached ();
13133 stind_to_store_membase (int opcode)
13137 return OP_STOREI1_MEMBASE_REG;
13139 return OP_STOREI2_MEMBASE_REG;
13141 return OP_STOREI4_MEMBASE_REG;
13143 case CEE_STIND_REF:
13144 return OP_STORE_MEMBASE_REG;
13146 return OP_STOREI8_MEMBASE_REG;
13148 return OP_STORER4_MEMBASE_REG;
13150 return OP_STORER8_MEMBASE_REG;
13152 g_assert_not_reached ();
13159 mono_load_membase_to_load_mem (int opcode)
13161 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13162 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13164 case OP_LOAD_MEMBASE:
13165 return OP_LOAD_MEM;
13166 case OP_LOADU1_MEMBASE:
13167 return OP_LOADU1_MEM;
13168 case OP_LOADU2_MEMBASE:
13169 return OP_LOADU2_MEM;
13170 case OP_LOADI4_MEMBASE:
13171 return OP_LOADI4_MEM;
13172 case OP_LOADU4_MEMBASE:
13173 return OP_LOADU4_MEM;
13174 #if SIZEOF_REGISTER == 8
13175 case OP_LOADI8_MEMBASE:
13176 return OP_LOADI8_MEM;
13185 op_to_op_dest_membase (int store_opcode, int opcode)
13187 #if defined(TARGET_X86)
13188 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13193 return OP_X86_ADD_MEMBASE_REG;
13195 return OP_X86_SUB_MEMBASE_REG;
13197 return OP_X86_AND_MEMBASE_REG;
13199 return OP_X86_OR_MEMBASE_REG;
13201 return OP_X86_XOR_MEMBASE_REG;
13204 return OP_X86_ADD_MEMBASE_IMM;
13207 return OP_X86_SUB_MEMBASE_IMM;
13210 return OP_X86_AND_MEMBASE_IMM;
13213 return OP_X86_OR_MEMBASE_IMM;
13216 return OP_X86_XOR_MEMBASE_IMM;
13222 #if defined(TARGET_AMD64)
13223 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13228 return OP_X86_ADD_MEMBASE_REG;
13230 return OP_X86_SUB_MEMBASE_REG;
13232 return OP_X86_AND_MEMBASE_REG;
13234 return OP_X86_OR_MEMBASE_REG;
13236 return OP_X86_XOR_MEMBASE_REG;
13238 return OP_X86_ADD_MEMBASE_IMM;
13240 return OP_X86_SUB_MEMBASE_IMM;
13242 return OP_X86_AND_MEMBASE_IMM;
13244 return OP_X86_OR_MEMBASE_IMM;
13246 return OP_X86_XOR_MEMBASE_IMM;
13248 return OP_AMD64_ADD_MEMBASE_REG;
13250 return OP_AMD64_SUB_MEMBASE_REG;
13252 return OP_AMD64_AND_MEMBASE_REG;
13254 return OP_AMD64_OR_MEMBASE_REG;
13256 return OP_AMD64_XOR_MEMBASE_REG;
13259 return OP_AMD64_ADD_MEMBASE_IMM;
13262 return OP_AMD64_SUB_MEMBASE_IMM;
13265 return OP_AMD64_AND_MEMBASE_IMM;
13268 return OP_AMD64_OR_MEMBASE_IMM;
13271 return OP_AMD64_XOR_MEMBASE_IMM;
13281 op_to_op_store_membase (int store_opcode, int opcode)
13283 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13286 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13287 return OP_X86_SETEQ_MEMBASE;
13289 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13290 return OP_X86_SETNE_MEMBASE;
13298 op_to_op_src1_membase (int load_opcode, int opcode)
13301 /* FIXME: This has sign extension issues */
13303 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13304 return OP_X86_COMPARE_MEMBASE8_IMM;
13307 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13312 return OP_X86_PUSH_MEMBASE;
13313 case OP_COMPARE_IMM:
13314 case OP_ICOMPARE_IMM:
13315 return OP_X86_COMPARE_MEMBASE_IMM;
13318 return OP_X86_COMPARE_MEMBASE_REG;
13322 #ifdef TARGET_AMD64
13323 /* FIXME: This has sign extension issues */
13325 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13326 return OP_X86_COMPARE_MEMBASE8_IMM;
13331 #ifdef __mono_ilp32__
13332 if (load_opcode == OP_LOADI8_MEMBASE)
13334 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13336 return OP_X86_PUSH_MEMBASE;
13338 /* FIXME: This only works for 32 bit immediates
13339 case OP_COMPARE_IMM:
13340 case OP_LCOMPARE_IMM:
13341 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13342 return OP_AMD64_COMPARE_MEMBASE_IMM;
13344 case OP_ICOMPARE_IMM:
13345 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13346 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13350 #ifdef __mono_ilp32__
13351 if (load_opcode == OP_LOAD_MEMBASE)
13352 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13353 if (load_opcode == OP_LOADI8_MEMBASE)
13355 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13357 return OP_AMD64_COMPARE_MEMBASE_REG;
13360 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13361 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13370 op_to_op_src2_membase (int load_opcode, int opcode)
13373 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13379 return OP_X86_COMPARE_REG_MEMBASE;
13381 return OP_X86_ADD_REG_MEMBASE;
13383 return OP_X86_SUB_REG_MEMBASE;
13385 return OP_X86_AND_REG_MEMBASE;
13387 return OP_X86_OR_REG_MEMBASE;
13389 return OP_X86_XOR_REG_MEMBASE;
13393 #ifdef TARGET_AMD64
13394 #ifdef __mono_ilp32__
13395 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13397 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13401 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13403 return OP_X86_ADD_REG_MEMBASE;
13405 return OP_X86_SUB_REG_MEMBASE;
13407 return OP_X86_AND_REG_MEMBASE;
13409 return OP_X86_OR_REG_MEMBASE;
13411 return OP_X86_XOR_REG_MEMBASE;
13413 #ifdef __mono_ilp32__
13414 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13416 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13421 return OP_AMD64_COMPARE_REG_MEMBASE;
13423 return OP_AMD64_ADD_REG_MEMBASE;
13425 return OP_AMD64_SUB_REG_MEMBASE;
13427 return OP_AMD64_AND_REG_MEMBASE;
13429 return OP_AMD64_OR_REG_MEMBASE;
13431 return OP_AMD64_XOR_REG_MEMBASE;
13440 mono_op_to_op_imm_noemul (int opcode)
13443 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13449 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13456 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13461 return mono_op_to_op_imm (opcode);
13466 * mono_handle_global_vregs:
13468 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13472 mono_handle_global_vregs (MonoCompile *cfg)
13474 gint32 *vreg_to_bb;
13475 MonoBasicBlock *bb;
13478 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13480 #ifdef MONO_ARCH_SIMD_INTRINSICS
13481 if (cfg->uses_simd_intrinsics)
13482 mono_simd_simplify_indirection (cfg);
13485 /* Find local vregs used in more than one bb */
13486 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13487 MonoInst *ins = bb->code;
13488 int block_num = bb->block_num;
13490 if (cfg->verbose_level > 2)
13491 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13494 for (; ins; ins = ins->next) {
13495 const char *spec = INS_INFO (ins->opcode);
13496 int regtype = 0, regindex;
13499 if (G_UNLIKELY (cfg->verbose_level > 2))
13500 mono_print_ins (ins);
13502 g_assert (ins->opcode >= MONO_CEE_LAST);
13504 for (regindex = 0; regindex < 4; regindex ++) {
13507 if (regindex == 0) {
13508 regtype = spec [MONO_INST_DEST];
13509 if (regtype == ' ')
13512 } else if (regindex == 1) {
13513 regtype = spec [MONO_INST_SRC1];
13514 if (regtype == ' ')
13517 } else if (regindex == 2) {
13518 regtype = spec [MONO_INST_SRC2];
13519 if (regtype == ' ')
13522 } else if (regindex == 3) {
13523 regtype = spec [MONO_INST_SRC3];
13524 if (regtype == ' ')
13529 #if SIZEOF_REGISTER == 4
13530 /* In the LLVM case, the long opcodes are not decomposed */
13531 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13533 * Since some instructions reference the original long vreg,
13534 * and some reference the two component vregs, it is quite hard
13535 * to determine when it needs to be global. So be conservative.
13537 if (!get_vreg_to_inst (cfg, vreg)) {
13538 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13540 if (cfg->verbose_level > 2)
13541 printf ("LONG VREG R%d made global.\n", vreg);
13545 * Make the component vregs volatile since the optimizations can
13546 * get confused otherwise.
13548 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13549 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13553 g_assert (vreg != -1);
13555 prev_bb = vreg_to_bb [vreg];
13556 if (prev_bb == 0) {
13557 /* 0 is a valid block num */
13558 vreg_to_bb [vreg] = block_num + 1;
13559 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13560 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13563 if (!get_vreg_to_inst (cfg, vreg)) {
13564 if (G_UNLIKELY (cfg->verbose_level > 2))
13565 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13569 if (vreg_is_ref (cfg, vreg))
13570 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13572 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13575 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13578 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13581 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13584 g_assert_not_reached ();
13588 /* Flag as having been used in more than one bb */
13589 vreg_to_bb [vreg] = -1;
13595 /* If a variable is used in only one bblock, convert it into a local vreg */
13596 for (i = 0; i < cfg->num_varinfo; i++) {
13597 MonoInst *var = cfg->varinfo [i];
13598 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13600 switch (var->type) {
13606 #if SIZEOF_REGISTER == 8
13609 #if !defined(TARGET_X86)
13610 /* Enabling this screws up the fp stack on x86 */
13613 if (mono_arch_is_soft_float ())
13616 /* Arguments are implicitly global */
13617 /* Putting R4 vars into registers doesn't work currently */
13618 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13619 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13621 * Make that the variable's liveness interval doesn't contain a call, since
13622 * that would cause the lvreg to be spilled, making the whole optimization
13625 /* This is too slow for JIT compilation */
13627 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13629 int def_index, call_index, ins_index;
13630 gboolean spilled = FALSE;
13635 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13636 const char *spec = INS_INFO (ins->opcode);
13638 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13639 def_index = ins_index;
13641 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13642 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13643 if (call_index > def_index) {
13649 if (MONO_IS_CALL (ins))
13650 call_index = ins_index;
13660 if (G_UNLIKELY (cfg->verbose_level > 2))
13661 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13662 var->flags |= MONO_INST_IS_DEAD;
13663 cfg->vreg_to_inst [var->dreg] = NULL;
13670 * Compress the varinfo and vars tables so the liveness computation is faster and
13671 * takes up less space.
13674 for (i = 0; i < cfg->num_varinfo; ++i) {
13675 MonoInst *var = cfg->varinfo [i];
13676 if (pos < i && cfg->locals_start == i)
13677 cfg->locals_start = pos;
13678 if (!(var->flags & MONO_INST_IS_DEAD)) {
13680 cfg->varinfo [pos] = cfg->varinfo [i];
13681 cfg->varinfo [pos]->inst_c0 = pos;
13682 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13683 cfg->vars [pos].idx = pos;
13684 #if SIZEOF_REGISTER == 4
13685 if (cfg->varinfo [pos]->type == STACK_I8) {
13686 /* Modify the two component vars too */
13689 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13690 var1->inst_c0 = pos;
13691 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13692 var1->inst_c0 = pos;
13699 cfg->num_varinfo = pos;
13700 if (cfg->locals_start > cfg->num_varinfo)
13701 cfg->locals_start = cfg->num_varinfo;
13705 * mono_spill_global_vars:
13707 * Generate spill code for variables which are not allocated to registers,
13708 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13709 * code is generated which could be optimized by the local optimization passes.
13712 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13714 MonoBasicBlock *bb;
13716 int orig_next_vreg;
13717 guint32 *vreg_to_lvreg;
13719 guint32 i, lvregs_len;
13720 gboolean dest_has_lvreg = FALSE;
13721 guint32 stacktypes [128];
13722 MonoInst **live_range_start, **live_range_end;
13723 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13724 int *gsharedvt_vreg_to_idx = NULL;
13726 *need_local_opts = FALSE;
13728 memset (spec2, 0, sizeof (spec2));
13730 /* FIXME: Move this function to mini.c */
13731 stacktypes ['i'] = STACK_PTR;
13732 stacktypes ['l'] = STACK_I8;
13733 stacktypes ['f'] = STACK_R8;
13734 #ifdef MONO_ARCH_SIMD_INTRINSICS
13735 stacktypes ['x'] = STACK_VTYPE;
13738 #if SIZEOF_REGISTER == 4
13739 /* Create MonoInsts for longs */
13740 for (i = 0; i < cfg->num_varinfo; i++) {
13741 MonoInst *ins = cfg->varinfo [i];
13743 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13744 switch (ins->type) {
13749 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13752 g_assert (ins->opcode == OP_REGOFFSET);
13754 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13756 tree->opcode = OP_REGOFFSET;
13757 tree->inst_basereg = ins->inst_basereg;
13758 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13760 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13762 tree->opcode = OP_REGOFFSET;
13763 tree->inst_basereg = ins->inst_basereg;
13764 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13774 if (cfg->compute_gc_maps) {
13775 /* registers need liveness info even for !non refs */
13776 for (i = 0; i < cfg->num_varinfo; i++) {
13777 MonoInst *ins = cfg->varinfo [i];
13779 if (ins->opcode == OP_REGVAR)
13780 ins->flags |= MONO_INST_GC_TRACK;
13784 if (cfg->gsharedvt) {
13785 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13787 for (i = 0; i < cfg->num_varinfo; ++i) {
13788 MonoInst *ins = cfg->varinfo [i];
13791 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13792 if (i >= cfg->locals_start) {
13794 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13795 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13796 ins->opcode = OP_GSHAREDVT_LOCAL;
13797 ins->inst_imm = idx;
13800 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13801 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13807 /* FIXME: widening and truncation */
13810 * As an optimization, when a variable allocated to the stack is first loaded into
13811 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13812 * the variable again.
13814 orig_next_vreg = cfg->next_vreg;
13815 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13816 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13820 * These arrays contain the first and last instructions accessing a given
13822 * Since we emit bblocks in the same order we process them here, and we
13823 * don't split live ranges, these will precisely describe the live range of
13824 * the variable, i.e. the instruction range where a valid value can be found
13825 * in the variables location.
13826 * The live range is computed using the liveness info computed by the liveness pass.
13827 * We can't use vmv->range, since that is an abstract live range, and we need
13828 * one which is instruction precise.
13829 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13831 /* FIXME: Only do this if debugging info is requested */
13832 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13833 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13834 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13835 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13837 /* Add spill loads/stores */
13838 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13841 if (cfg->verbose_level > 2)
13842 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13844 /* Clear vreg_to_lvreg array */
13845 for (i = 0; i < lvregs_len; i++)
13846 vreg_to_lvreg [lvregs [i]] = 0;
13850 MONO_BB_FOR_EACH_INS (bb, ins) {
13851 const char *spec = INS_INFO (ins->opcode);
13852 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13853 gboolean store, no_lvreg;
13854 int sregs [MONO_MAX_SRC_REGS];
13856 if (G_UNLIKELY (cfg->verbose_level > 2))
13857 mono_print_ins (ins);
13859 if (ins->opcode == OP_NOP)
13863 * We handle LDADDR here as well, since it can only be decomposed
13864 * when variable addresses are known.
13866 if (ins->opcode == OP_LDADDR) {
13867 MonoInst *var = ins->inst_p0;
13869 if (var->opcode == OP_VTARG_ADDR) {
13870 /* Happens on SPARC/S390 where vtypes are passed by reference */
13871 MonoInst *vtaddr = var->inst_left;
13872 if (vtaddr->opcode == OP_REGVAR) {
13873 ins->opcode = OP_MOVE;
13874 ins->sreg1 = vtaddr->dreg;
13876 else if (var->inst_left->opcode == OP_REGOFFSET) {
13877 ins->opcode = OP_LOAD_MEMBASE;
13878 ins->inst_basereg = vtaddr->inst_basereg;
13879 ins->inst_offset = vtaddr->inst_offset;
13882 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13883 /* gsharedvt arg passed by ref */
13884 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13886 ins->opcode = OP_LOAD_MEMBASE;
13887 ins->inst_basereg = var->inst_basereg;
13888 ins->inst_offset = var->inst_offset;
13889 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13890 MonoInst *load, *load2, *load3;
13891 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13892 int reg1, reg2, reg3;
13893 MonoInst *info_var = cfg->gsharedvt_info_var;
13894 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13898 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13901 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13903 g_assert (info_var);
13904 g_assert (locals_var);
13906 /* Mark the instruction used to compute the locals var as used */
13907 cfg->gsharedvt_locals_var_ins = NULL;
13909 /* Load the offset */
13910 if (info_var->opcode == OP_REGOFFSET) {
13911 reg1 = alloc_ireg (cfg);
13912 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13913 } else if (info_var->opcode == OP_REGVAR) {
13915 reg1 = info_var->dreg;
13917 g_assert_not_reached ();
13919 reg2 = alloc_ireg (cfg);
13920 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13921 /* Load the locals area address */
13922 reg3 = alloc_ireg (cfg);
13923 if (locals_var->opcode == OP_REGOFFSET) {
13924 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13925 } else if (locals_var->opcode == OP_REGVAR) {
13926 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13928 g_assert_not_reached ();
13930 /* Compute the address */
13931 ins->opcode = OP_PADD;
13935 mono_bblock_insert_before_ins (bb, ins, load3);
13936 mono_bblock_insert_before_ins (bb, load3, load2);
13938 mono_bblock_insert_before_ins (bb, load2, load);
13940 g_assert (var->opcode == OP_REGOFFSET);
13942 ins->opcode = OP_ADD_IMM;
13943 ins->sreg1 = var->inst_basereg;
13944 ins->inst_imm = var->inst_offset;
13947 *need_local_opts = TRUE;
13948 spec = INS_INFO (ins->opcode);
13951 if (ins->opcode < MONO_CEE_LAST) {
13952 mono_print_ins (ins);
13953 g_assert_not_reached ();
13957 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13961 if (MONO_IS_STORE_MEMBASE (ins)) {
13962 tmp_reg = ins->dreg;
13963 ins->dreg = ins->sreg2;
13964 ins->sreg2 = tmp_reg;
13967 spec2 [MONO_INST_DEST] = ' ';
13968 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13969 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13970 spec2 [MONO_INST_SRC3] = ' ';
13972 } else if (MONO_IS_STORE_MEMINDEX (ins))
13973 g_assert_not_reached ();
13978 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13979 printf ("\t %.3s %d", spec, ins->dreg);
13980 num_sregs = mono_inst_get_src_registers (ins, sregs);
13981 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13982 printf (" %d", sregs [srcindex]);
13989 regtype = spec [MONO_INST_DEST];
13990 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13993 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13994 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13995 MonoInst *store_ins;
13997 MonoInst *def_ins = ins;
13998 int dreg = ins->dreg; /* The original vreg */
14000 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14002 if (var->opcode == OP_REGVAR) {
14003 ins->dreg = var->dreg;
14004 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14006 * Instead of emitting a load+store, use a _membase opcode.
14008 g_assert (var->opcode == OP_REGOFFSET);
14009 if (ins->opcode == OP_MOVE) {
14013 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14014 ins->inst_basereg = var->inst_basereg;
14015 ins->inst_offset = var->inst_offset;
14018 spec = INS_INFO (ins->opcode);
14022 g_assert (var->opcode == OP_REGOFFSET);
14024 prev_dreg = ins->dreg;
14026 /* Invalidate any previous lvreg for this vreg */
14027 vreg_to_lvreg [ins->dreg] = 0;
14031 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14033 store_opcode = OP_STOREI8_MEMBASE_REG;
14036 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14038 #if SIZEOF_REGISTER != 8
14039 if (regtype == 'l') {
14040 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14041 mono_bblock_insert_after_ins (bb, ins, store_ins);
14042 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14043 mono_bblock_insert_after_ins (bb, ins, store_ins);
14044 def_ins = store_ins;
14049 g_assert (store_opcode != OP_STOREV_MEMBASE);
14051 /* Try to fuse the store into the instruction itself */
14052 /* FIXME: Add more instructions */
14053 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14054 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14055 ins->inst_imm = ins->inst_c0;
14056 ins->inst_destbasereg = var->inst_basereg;
14057 ins->inst_offset = var->inst_offset;
14058 spec = INS_INFO (ins->opcode);
14059 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14060 ins->opcode = store_opcode;
14061 ins->inst_destbasereg = var->inst_basereg;
14062 ins->inst_offset = var->inst_offset;
14066 tmp_reg = ins->dreg;
14067 ins->dreg = ins->sreg2;
14068 ins->sreg2 = tmp_reg;
14071 spec2 [MONO_INST_DEST] = ' ';
14072 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14073 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14074 spec2 [MONO_INST_SRC3] = ' ';
14076 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14077 // FIXME: The backends expect the base reg to be in inst_basereg
14078 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14080 ins->inst_basereg = var->inst_basereg;
14081 ins->inst_offset = var->inst_offset;
14082 spec = INS_INFO (ins->opcode);
14084 /* printf ("INS: "); mono_print_ins (ins); */
14085 /* Create a store instruction */
14086 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14088 /* Insert it after the instruction */
14089 mono_bblock_insert_after_ins (bb, ins, store_ins);
14091 def_ins = store_ins;
14094 * We can't assign ins->dreg to var->dreg here, since the
14095 * sregs could use it. So set a flag, and do it after
14098 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14099 dest_has_lvreg = TRUE;
14104 if (def_ins && !live_range_start [dreg]) {
14105 live_range_start [dreg] = def_ins;
14106 live_range_start_bb [dreg] = bb;
14109 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14112 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14113 tmp->inst_c1 = dreg;
14114 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14121 num_sregs = mono_inst_get_src_registers (ins, sregs);
14122 for (srcindex = 0; srcindex < 3; ++srcindex) {
14123 regtype = spec [MONO_INST_SRC1 + srcindex];
14124 sreg = sregs [srcindex];
14126 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14127 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14128 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14129 MonoInst *use_ins = ins;
14130 MonoInst *load_ins;
14131 guint32 load_opcode;
14133 if (var->opcode == OP_REGVAR) {
14134 sregs [srcindex] = var->dreg;
14135 //mono_inst_set_src_registers (ins, sregs);
14136 live_range_end [sreg] = use_ins;
14137 live_range_end_bb [sreg] = bb;
14139 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14142 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14143 /* var->dreg is a hreg */
14144 tmp->inst_c1 = sreg;
14145 mono_bblock_insert_after_ins (bb, ins, tmp);
14151 g_assert (var->opcode == OP_REGOFFSET);
14153 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14155 g_assert (load_opcode != OP_LOADV_MEMBASE);
14157 if (vreg_to_lvreg [sreg]) {
14158 g_assert (vreg_to_lvreg [sreg] != -1);
14160 /* The variable is already loaded to an lvreg */
14161 if (G_UNLIKELY (cfg->verbose_level > 2))
14162 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14163 sregs [srcindex] = vreg_to_lvreg [sreg];
14164 //mono_inst_set_src_registers (ins, sregs);
14168 /* Try to fuse the load into the instruction */
14169 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14170 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14171 sregs [0] = var->inst_basereg;
14172 //mono_inst_set_src_registers (ins, sregs);
14173 ins->inst_offset = var->inst_offset;
14174 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14175 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14176 sregs [1] = var->inst_basereg;
14177 //mono_inst_set_src_registers (ins, sregs);
14178 ins->inst_offset = var->inst_offset;
14180 if (MONO_IS_REAL_MOVE (ins)) {
14181 ins->opcode = OP_NOP;
14184 //printf ("%d ", srcindex); mono_print_ins (ins);
14186 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14188 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14189 if (var->dreg == prev_dreg) {
14191 * sreg refers to the value loaded by the load
14192 * emitted below, but we need to use ins->dreg
14193 * since it refers to the store emitted earlier.
14197 g_assert (sreg != -1);
14198 vreg_to_lvreg [var->dreg] = sreg;
14199 g_assert (lvregs_len < 1024);
14200 lvregs [lvregs_len ++] = var->dreg;
14204 sregs [srcindex] = sreg;
14205 //mono_inst_set_src_registers (ins, sregs);
14207 #if SIZEOF_REGISTER != 8
14208 if (regtype == 'l') {
14209 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14210 mono_bblock_insert_before_ins (bb, ins, load_ins);
14211 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14212 mono_bblock_insert_before_ins (bb, ins, load_ins);
14213 use_ins = load_ins;
14218 #if SIZEOF_REGISTER == 4
14219 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14221 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14222 mono_bblock_insert_before_ins (bb, ins, load_ins);
14223 use_ins = load_ins;
14227 if (var->dreg < orig_next_vreg) {
14228 live_range_end [var->dreg] = use_ins;
14229 live_range_end_bb [var->dreg] = bb;
14232 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14235 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14236 tmp->inst_c1 = var->dreg;
14237 mono_bblock_insert_after_ins (bb, ins, tmp);
14241 mono_inst_set_src_registers (ins, sregs);
14243 if (dest_has_lvreg) {
14244 g_assert (ins->dreg != -1);
14245 vreg_to_lvreg [prev_dreg] = ins->dreg;
14246 g_assert (lvregs_len < 1024);
14247 lvregs [lvregs_len ++] = prev_dreg;
14248 dest_has_lvreg = FALSE;
14252 tmp_reg = ins->dreg;
14253 ins->dreg = ins->sreg2;
14254 ins->sreg2 = tmp_reg;
14257 if (MONO_IS_CALL (ins)) {
14258 /* Clear vreg_to_lvreg array */
14259 for (i = 0; i < lvregs_len; i++)
14260 vreg_to_lvreg [lvregs [i]] = 0;
14262 } else if (ins->opcode == OP_NOP) {
14264 MONO_INST_NULLIFY_SREGS (ins);
14267 if (cfg->verbose_level > 2)
14268 mono_print_ins_index (1, ins);
14271 /* Extend the live range based on the liveness info */
14272 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14273 for (i = 0; i < cfg->num_varinfo; i ++) {
14274 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14276 if (vreg_is_volatile (cfg, vi->vreg))
14277 /* The liveness info is incomplete */
14280 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14281 /* Live from at least the first ins of this bb */
14282 live_range_start [vi->vreg] = bb->code;
14283 live_range_start_bb [vi->vreg] = bb;
14286 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14287 /* Live at least until the last ins of this bb */
14288 live_range_end [vi->vreg] = bb->last_ins;
14289 live_range_end_bb [vi->vreg] = bb;
14295 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14297 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14298 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14300 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14301 for (i = 0; i < cfg->num_varinfo; ++i) {
14302 int vreg = MONO_VARINFO (cfg, i)->vreg;
14305 if (live_range_start [vreg]) {
14306 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14308 ins->inst_c1 = vreg;
14309 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14311 if (live_range_end [vreg]) {
14312 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14314 ins->inst_c1 = vreg;
14315 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14316 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14318 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14324 if (cfg->gsharedvt_locals_var_ins) {
14325 /* Nullify if unused */
14326 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14327 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14330 g_free (live_range_start);
14331 g_free (live_range_end);
14332 g_free (live_range_start_bb);
14333 g_free (live_range_end_bb);
14338 * - use 'iadd' instead of 'int_add'
14339 * - handling ovf opcodes: decompose in method_to_ir.
14340 * - unify iregs/fregs
14341 * -> partly done, the missing parts are:
14342 * - a more complete unification would involve unifying the hregs as well, so
14343 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14344 * would no longer map to the machine hregs, so the code generators would need to
14345 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14346 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14347 * fp/non-fp branches speeds it up by about 15%.
14348 * - use sext/zext opcodes instead of shifts
14350 * - get rid of TEMPLOADs if possible and use vregs instead
14351 * - clean up usage of OP_P/OP_ opcodes
14352 * - cleanup usage of DUMMY_USE
14353 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14355 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14356 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14357 * - make sure handle_stack_args () is called before the branch is emitted
14358 * - when the new IR is done, get rid of all unused stuff
14359 * - COMPARE/BEQ as separate instructions or unify them ?
14360 * - keeping them separate allows specialized compare instructions like
14361 * compare_imm, compare_membase
14362 * - most back ends unify fp compare+branch, fp compare+ceq
14363 * - integrate mono_save_args into inline_method
14364 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14365 * - handle long shift opts on 32 bit platforms somehow: they require
14366 * 3 sregs (2 for arg1 and 1 for arg2)
14367 * - make byref a 'normal' type.
14368 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14369 * variable if needed.
14370 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14371 * like inline_method.
14372 * - remove inlining restrictions
14373 * - fix LNEG and enable cfold of INEG
14374 * - generalize x86 optimizations like ldelema as a peephole optimization
14375 * - add store_mem_imm for amd64
14376 * - optimize the loading of the interruption flag in the managed->native wrappers
14377 * - avoid special handling of OP_NOP in passes
14378 * - move code inserting instructions into one function/macro.
14379 * - try a coalescing phase after liveness analysis
14380 * - add float -> vreg conversion + local optimizations on !x86
14381 * - figure out how to handle decomposed branches during optimizations, ie.
14382 * compare+branch, op_jump_table+op_br etc.
14383 * - promote RuntimeXHandles to vregs
14384 * - vtype cleanups:
14385 * - add a NEW_VARLOADA_VREG macro
14386 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14387 * accessing vtype fields.
14388 * - get rid of I8CONST on 64 bit platforms
14389 * - dealing with the increase in code size due to branches created during opcode
14391 * - use extended basic blocks
14392 * - all parts of the JIT
14393 * - handle_global_vregs () && local regalloc
14394 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14395 * - sources of increase in code size:
14398 * - isinst and castclass
14399 * - lvregs not allocated to global registers even if used multiple times
14400 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14402 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14403 * - add all micro optimizations from the old JIT
14404 * - put tree optimizations into the deadce pass
14405 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14406 * specific function.
14407 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14408 * fcompare + branchCC.
14409 * - create a helper function for allocating a stack slot, taking into account
14410 * MONO_CFG_HAS_SPILLUP.
14412 * - merge the ia64 switch changes.
14413 * - optimize mono_regstate2_alloc_int/float.
14414 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14415 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14416 * parts of the tree could be separated by other instructions, killing the tree
14417 * arguments, or stores killing loads etc. Also, should we fold loads into other
14418 * instructions if the result of the load is used multiple times ?
14419 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14420 * - LAST MERGE: 108395.
14421 * - when returning vtypes in registers, generate IR and append it to the end of the
14422 * last bb instead of doing it in the epilog.
14423 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14431 - When to decompose opcodes:
14432 - earlier: this makes some optimizations hard to implement, since the low level IR
14433 no longer contains the neccessary information. But it is easier to do.
14434 - later: harder to implement, enables more optimizations.
14435 - Branches inside bblocks:
14436 - created when decomposing complex opcodes.
14437 - branches to another bblock: harmless, but not tracked by the branch
14438 optimizations, so need to branch to a label at the start of the bblock.
14439 - branches to inside the same bblock: very problematic, trips up the local
14440 reg allocator. Can be fixed by spitting the current bblock, but that is a
14441 complex operation, since some local vregs can become global vregs etc.
14442 - Local/global vregs:
14443 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14444 local register allocator.
14445 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14446 structure, created by mono_create_var (). Assigned to hregs or the stack by
14447 the global register allocator.
14448 - When to do optimizations like alu->alu_imm:
14449 - earlier -> saves work later on since the IR will be smaller/simpler
14450 - later -> can work on more instructions
14451 - Handling of valuetypes:
14452 - When a vtype is pushed on the stack, a new temporary is created, an
14453 instruction computing its address (LDADDR) is emitted and pushed on
14454 the stack. Need to optimize cases when the vtype is used immediately as in
14455 argument passing, stloc etc.
14456 - Instead of the to_end stuff in the old JIT, simply call the function handling
14457 the values on the stack before emitting the last instruction of the bb.
14460 #endif /* DISABLE_JIT */