2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_get_underlying_type (cfg, type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
312 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
368 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
384 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
395 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 /* Emit conversions so both operands of a binary opcode are of the same type */
464 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
466 MonoInst *arg1 = *arg1_ref;
467 MonoInst *arg2 = *arg2_ref;
470 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
471 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
474 /* Mixing r4/r8 is allowed by the spec */
475 if (arg1->type == STACK_R4) {
476 int dreg = alloc_freg (cfg);
478 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
479 conv->type = STACK_R8;
483 if (arg2->type == STACK_R4) {
484 int dreg = alloc_freg (cfg);
486 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
487 conv->type = STACK_R8;
493 #if SIZEOF_REGISTER == 8
494 /* FIXME: Need to add many more cases */
495 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
498 int dr = alloc_preg (cfg);
499 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
500 (ins)->sreg2 = widen->dreg;
505 #define ADD_BINOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 ins->sreg2 = sp [1]->dreg; \
510 type_from_op (cfg, ins, sp [0], sp [1]); \
512 /* Have to insert a widening op */ \
513 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
514 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
515 MONO_ADD_INS ((cfg)->cbb, (ins)); \
516 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
519 #define ADD_UNOP(op) do { \
520 MONO_INST_NEW (cfg, ins, (op)); \
522 ins->sreg1 = sp [0]->dreg; \
523 type_from_op (cfg, ins, sp [0], NULL); \
525 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
526 MONO_ADD_INS ((cfg)->cbb, (ins)); \
527 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
530 #define ADD_BINCOND(next_block) do { \
533 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
534 cmp->sreg1 = sp [0]->dreg; \
535 cmp->sreg2 = sp [1]->dreg; \
536 type_from_op (cfg, cmp, sp [0], sp [1]); \
538 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
539 type_from_op (cfg, ins, sp [0], sp [1]); \
540 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
541 GET_BBLOCK (cfg, tblock, target); \
542 link_bblock (cfg, bblock, tblock); \
543 ins->inst_true_bb = tblock; \
544 if ((next_block)) { \
545 link_bblock (cfg, bblock, (next_block)); \
546 ins->inst_false_bb = (next_block); \
547 start_new_bblock = 1; \
549 GET_BBLOCK (cfg, tblock, ip); \
550 link_bblock (cfg, bblock, tblock); \
551 ins->inst_false_bb = tblock; \
552 start_new_bblock = 2; \
554 if (sp != stack_start) { \
555 handle_stack_args (cfg, stack_start, sp - stack_start); \
556 CHECK_UNVERIFIABLE (cfg); \
558 MONO_ADD_INS (bblock, cmp); \
559 MONO_ADD_INS (bblock, ins); \
563 * link_bblock: Links two basic blocks
565 * links two basic blocks in the control flow graph, the 'from'
566 * argument is the starting block and the 'to' argument is the block
567 * the control flow ends to after 'from'.
570 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
572 MonoBasicBlock **newa;
576 if (from->cil_code) {
578 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
580 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
583 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
585 printf ("edge from entry to exit\n");
590 for (i = 0; i < from->out_count; ++i) {
591 if (to == from->out_bb [i]) {
597 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
598 for (i = 0; i < from->out_count; ++i) {
599 newa [i] = from->out_bb [i];
607 for (i = 0; i < to->in_count; ++i) {
608 if (from == to->in_bb [i]) {
614 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
615 for (i = 0; i < to->in_count; ++i) {
616 newa [i] = to->in_bb [i];
625 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
627 link_bblock (cfg, from, to);
631 * mono_find_block_region:
633 * We mark each basic block with a region ID. We use that to avoid BB
634 * optimizations when blocks are in different regions.
637 * A region token that encodes where this region is, and information
638 * about the clause owner for this block.
640 * The region encodes the try/catch/filter clause that owns this block
641 * as well as the type. -1 is a special value that represents a block
642 * that is in none of try/catch/filter.
645 mono_find_block_region (MonoCompile *cfg, int offset)
647 MonoMethodHeader *header = cfg->header;
648 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
654 (offset < (clause->handler_offset)))
655 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
657 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
658 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
659 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
660 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
661 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
663 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
666 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
667 return ((i + 1) << 8) | clause->flags;
674 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
676 MonoMethodHeader *header = cfg->header;
677 MonoExceptionClause *clause;
681 for (i = 0; i < header->num_clauses; ++i) {
682 clause = &header->clauses [i];
683 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
684 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
685 if (clause->flags == type)
686 res = g_list_append (res, clause);
693 mono_create_spvar_for_region (MonoCompile *cfg, int region)
697 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
701 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
702 /* prevent it from being register allocated */
703 var->flags |= MONO_INST_VOLATILE;
705 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
709 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
711 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
715 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
719 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
723 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
724 /* prevent it from being register allocated */
725 var->flags |= MONO_INST_VOLATILE;
727 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
733 * Returns the type used in the eval stack when @type is loaded.
734 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
737 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
741 type = mini_get_underlying_type (cfg, type);
742 inst->klass = klass = mono_class_from_mono_type (type);
744 inst->type = STACK_MP;
749 switch (type->type) {
751 inst->type = STACK_INV;
755 case MONO_TYPE_BOOLEAN:
761 inst->type = STACK_I4;
766 case MONO_TYPE_FNPTR:
767 inst->type = STACK_PTR;
769 case MONO_TYPE_CLASS:
770 case MONO_TYPE_STRING:
771 case MONO_TYPE_OBJECT:
772 case MONO_TYPE_SZARRAY:
773 case MONO_TYPE_ARRAY:
774 inst->type = STACK_OBJ;
778 inst->type = STACK_I8;
781 inst->type = cfg->r4_stack_type;
784 inst->type = STACK_R8;
786 case MONO_TYPE_VALUETYPE:
787 if (type->data.klass->enumtype) {
788 type = mono_class_enum_basetype (type->data.klass);
792 inst->type = STACK_VTYPE;
795 case MONO_TYPE_TYPEDBYREF:
796 inst->klass = mono_defaults.typed_reference_class;
797 inst->type = STACK_VTYPE;
799 case MONO_TYPE_GENERICINST:
800 type = &type->data.generic_class->container_class->byval_arg;
804 g_assert (cfg->generic_sharing_context);
805 if (mini_is_gsharedvt_type (cfg, type)) {
806 g_assert (cfg->gsharedvt);
807 inst->type = STACK_VTYPE;
809 inst->type = STACK_OBJ;
813 g_error ("unknown type 0x%02x in eval stack type", type->type);
818 * The following tables are used to quickly validate the IL code in type_from_op ().
821 bin_num_table [STACK_MAX] [STACK_MAX] = {
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
827 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
835 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
838 /* reduce the size of this table */
840 bin_int_table [STACK_MAX] [STACK_MAX] = {
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
852 bin_comp_table [STACK_MAX] [STACK_MAX] = {
853 /* Inv i L p F & O vt r4 */
855 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
856 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
857 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
859 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
860 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
861 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
862 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
865 /* reduce the size of this table */
867 shift_table [STACK_MAX] [STACK_MAX] = {
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
879 * Tables to map from the non-specific opcode to the matching
880 * type-specific opcode.
882 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
884 binops_op_map [STACK_MAX] = {
885 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
888 /* handles from CEE_NEG to CEE_CONV_U8 */
890 unops_op_map [STACK_MAX] = {
891 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
894 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
896 ovfops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
900 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
902 ovf2ops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
906 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
908 ovf3ops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
912 /* handles from CEE_BEQ to CEE_BLT_UN */
914 beqops_op_map [STACK_MAX] = {
915 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
918 /* handles from CEE_CEQ to CEE_CLT_UN */
920 ceqops_op_map [STACK_MAX] = {
921 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
925 * Sets ins->type (the type on the eval stack) according to the
926 * type of the opcode and the arguments to it.
927 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
929 * FIXME: this function sets ins->type unconditionally in some cases, but
930 * it should set it to invalid for some types (a conv.x on an object)
933 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
935 switch (ins->opcode) {
942 /* FIXME: check unverifiable args for STACK_MP */
943 ins->type = bin_num_table [src1->type] [src2->type];
944 ins->opcode += binops_op_map [ins->type];
951 ins->type = bin_int_table [src1->type] [src2->type];
952 ins->opcode += binops_op_map [ins->type];
957 ins->type = shift_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
963 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE;
966 else if (src1->type == STACK_R4)
967 ins->opcode = OP_RCOMPARE;
968 else if (src1->type == STACK_R8)
969 ins->opcode = OP_FCOMPARE;
971 ins->opcode = OP_ICOMPARE;
973 case OP_ICOMPARE_IMM:
974 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
975 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
976 ins->opcode = OP_LCOMPARE_IMM;
988 ins->opcode += beqops_op_map [src1->type];
991 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
992 ins->opcode += ceqops_op_map [src1->type];
998 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
999 ins->opcode += ceqops_op_map [src1->type];
1003 ins->type = neg_table [src1->type];
1004 ins->opcode += unops_op_map [ins->type];
1007 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1008 ins->type = src1->type;
1010 ins->type = STACK_INV;
1011 ins->opcode += unops_op_map [ins->type];
1017 ins->type = STACK_I4;
1018 ins->opcode += unops_op_map [src1->type];
1021 ins->type = STACK_R8;
1022 switch (src1->type) {
1025 ins->opcode = OP_ICONV_TO_R_UN;
1028 ins->opcode = OP_LCONV_TO_R_UN;
1032 case CEE_CONV_OVF_I1:
1033 case CEE_CONV_OVF_U1:
1034 case CEE_CONV_OVF_I2:
1035 case CEE_CONV_OVF_U2:
1036 case CEE_CONV_OVF_I4:
1037 case CEE_CONV_OVF_U4:
1038 ins->type = STACK_I4;
1039 ins->opcode += ovf3ops_op_map [src1->type];
1041 case CEE_CONV_OVF_I_UN:
1042 case CEE_CONV_OVF_U_UN:
1043 ins->type = STACK_PTR;
1044 ins->opcode += ovf2ops_op_map [src1->type];
1046 case CEE_CONV_OVF_I1_UN:
1047 case CEE_CONV_OVF_I2_UN:
1048 case CEE_CONV_OVF_I4_UN:
1049 case CEE_CONV_OVF_U1_UN:
1050 case CEE_CONV_OVF_U2_UN:
1051 case CEE_CONV_OVF_U4_UN:
1052 ins->type = STACK_I4;
1053 ins->opcode += ovf2ops_op_map [src1->type];
1056 ins->type = STACK_PTR;
1057 switch (src1->type) {
1059 ins->opcode = OP_ICONV_TO_U;
1063 #if SIZEOF_VOID_P == 8
1064 ins->opcode = OP_LCONV_TO_U;
1066 ins->opcode = OP_MOVE;
1070 ins->opcode = OP_LCONV_TO_U;
1073 ins->opcode = OP_FCONV_TO_U;
1079 ins->type = STACK_I8;
1080 ins->opcode += unops_op_map [src1->type];
1082 case CEE_CONV_OVF_I8:
1083 case CEE_CONV_OVF_U8:
1084 ins->type = STACK_I8;
1085 ins->opcode += ovf3ops_op_map [src1->type];
1087 case CEE_CONV_OVF_U8_UN:
1088 case CEE_CONV_OVF_I8_UN:
1089 ins->type = STACK_I8;
1090 ins->opcode += ovf2ops_op_map [src1->type];
1093 ins->type = cfg->r4_stack_type;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1098 ins->opcode += unops_op_map [src1->type];
1101 ins->type = STACK_R8;
1105 ins->type = STACK_I4;
1106 ins->opcode += ovfops_op_map [src1->type];
1109 case CEE_CONV_OVF_I:
1110 case CEE_CONV_OVF_U:
1111 ins->type = STACK_PTR;
1112 ins->opcode += ovfops_op_map [src1->type];
1115 case CEE_ADD_OVF_UN:
1117 case CEE_MUL_OVF_UN:
1119 case CEE_SUB_OVF_UN:
1120 ins->type = bin_num_table [src1->type] [src2->type];
1121 ins->opcode += ovfops_op_map [src1->type];
1122 if (ins->type == STACK_R8)
1123 ins->type = STACK_INV;
1125 case OP_LOAD_MEMBASE:
1126 ins->type = STACK_PTR;
1128 case OP_LOADI1_MEMBASE:
1129 case OP_LOADU1_MEMBASE:
1130 case OP_LOADI2_MEMBASE:
1131 case OP_LOADU2_MEMBASE:
1132 case OP_LOADI4_MEMBASE:
1133 case OP_LOADU4_MEMBASE:
1134 ins->type = STACK_PTR;
1136 case OP_LOADI8_MEMBASE:
1137 ins->type = STACK_I8;
1139 case OP_LOADR4_MEMBASE:
1140 ins->type = cfg->r4_stack_type;
1142 case OP_LOADR8_MEMBASE:
1143 ins->type = STACK_R8;
1146 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1150 if (ins->type == STACK_MP)
1151 ins->klass = mono_defaults.object_class;
1156 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1162 param_table [STACK_MAX] [STACK_MAX] = {
1167 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1171 switch (args->type) {
1181 for (i = 0; i < sig->param_count; ++i) {
1182 switch (args [i].type) {
1186 if (!sig->params [i]->byref)
1190 if (sig->params [i]->byref)
1192 switch (sig->params [i]->type) {
1193 case MONO_TYPE_CLASS:
1194 case MONO_TYPE_STRING:
1195 case MONO_TYPE_OBJECT:
1196 case MONO_TYPE_SZARRAY:
1197 case MONO_TYPE_ARRAY:
1204 if (sig->params [i]->byref)
1206 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1215 /*if (!param_table [args [i].type] [sig->params [i]->type])
1223 * When we need a pointer to the current domain many times in a method, we
1224 * call mono_domain_get() once and we store the result in a local variable.
1225 * This function returns the variable that represents the MonoDomain*.
1227 inline static MonoInst *
1228 mono_get_domainvar (MonoCompile *cfg)
1230 if (!cfg->domainvar)
1231 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1232 return cfg->domainvar;
1236 * The got_var contains the address of the Global Offset Table when AOT
1240 mono_get_got_var (MonoCompile *cfg)
1242 #ifdef MONO_ARCH_NEED_GOT_VAR
1243 if (!cfg->compile_aot)
1245 if (!cfg->got_var) {
1246 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1248 return cfg->got_var;
1255 mono_get_vtable_var (MonoCompile *cfg)
1257 g_assert (cfg->generic_sharing_context);
1259 if (!cfg->rgctx_var) {
1260 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 /* force the var to be stack allocated */
1262 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1265 return cfg->rgctx_var;
1269 type_from_stack_type (MonoInst *ins) {
1270 switch (ins->type) {
1271 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1272 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1273 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1274 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1275 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1277 return &ins->klass->this_arg;
1278 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1279 case STACK_VTYPE: return &ins->klass->byval_arg;
1281 g_error ("stack type %d to monotype not handled\n", ins->type);
1286 static G_GNUC_UNUSED int
1287 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1289 t = mono_type_get_underlying_type (t);
1293 case MONO_TYPE_BOOLEAN:
1296 case MONO_TYPE_CHAR:
1303 case MONO_TYPE_FNPTR:
1305 case MONO_TYPE_CLASS:
1306 case MONO_TYPE_STRING:
1307 case MONO_TYPE_OBJECT:
1308 case MONO_TYPE_SZARRAY:
1309 case MONO_TYPE_ARRAY:
1315 return cfg->r4_stack_type;
1318 case MONO_TYPE_VALUETYPE:
1319 case MONO_TYPE_TYPEDBYREF:
1321 case MONO_TYPE_GENERICINST:
1322 if (mono_type_generic_inst_is_valuetype (t))
1328 g_assert_not_reached ();
1335 array_access_to_klass (int opcode)
1339 return mono_defaults.byte_class;
1341 return mono_defaults.uint16_class;
1344 return mono_defaults.int_class;
1347 return mono_defaults.sbyte_class;
1350 return mono_defaults.int16_class;
1353 return mono_defaults.int32_class;
1355 return mono_defaults.uint32_class;
1358 return mono_defaults.int64_class;
1361 return mono_defaults.single_class;
1364 return mono_defaults.double_class;
1365 case CEE_LDELEM_REF:
1366 case CEE_STELEM_REF:
1367 return mono_defaults.object_class;
1369 g_assert_not_reached ();
1375 * We try to share variables when possible
1378 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1383 /* inlining can result in deeper stacks */
1384 if (slot >= cfg->header->max_stack)
1385 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1387 pos = ins->type - 1 + slot * STACK_MAX;
1389 switch (ins->type) {
1396 if ((vnum = cfg->intvars [pos]))
1397 return cfg->varinfo [vnum];
1398 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1399 cfg->intvars [pos] = res->inst_c0;
1402 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1408 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1411 * Don't use this if a generic_context is set, since that means AOT can't
1412 * look up the method using just the image+token.
1413 * table == 0 means this is a reference made from a wrapper.
1415 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1416 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1417 jump_info_token->image = image;
1418 jump_info_token->token = token;
1419 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1424 * This function is called to handle items that are left on the evaluation stack
1425 * at basic block boundaries. What happens is that we save the values to local variables
1426 * and we reload them later when first entering the target basic block (with the
1427 * handle_loaded_temps () function).
1428 * A single joint point will use the same variables (stored in the array bb->out_stack or
1429 * bb->in_stack, if the basic block is before or after the joint point).
1431 * This function needs to be called _before_ emitting the last instruction of
1432 * the bb (i.e. before emitting a branch).
1433 * If the stack merge fails at a join point, cfg->unverifiable is set.
1436 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1439 MonoBasicBlock *bb = cfg->cbb;
1440 MonoBasicBlock *outb;
1441 MonoInst *inst, **locals;
1446 if (cfg->verbose_level > 3)
1447 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1448 if (!bb->out_scount) {
1449 bb->out_scount = count;
1450 //printf ("bblock %d has out:", bb->block_num);
1452 for (i = 0; i < bb->out_count; ++i) {
1453 outb = bb->out_bb [i];
1454 /* exception handlers are linked, but they should not be considered for stack args */
1455 if (outb->flags & BB_EXCEPTION_HANDLER)
1457 //printf (" %d", outb->block_num);
1458 if (outb->in_stack) {
1460 bb->out_stack = outb->in_stack;
1466 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1467 for (i = 0; i < count; ++i) {
1469 * try to reuse temps already allocated for this purpouse, if they occupy the same
1470 * stack slot and if they are of the same type.
1471 * This won't cause conflicts since if 'local' is used to
1472 * store one of the values in the in_stack of a bblock, then
1473 * the same variable will be used for the same outgoing stack
1475 * This doesn't work when inlining methods, since the bblocks
1476 * in the inlined methods do not inherit their in_stack from
1477 * the bblock they are inlined to. See bug #58863 for an
1480 if (cfg->inlined_method)
1481 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1483 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1488 for (i = 0; i < bb->out_count; ++i) {
1489 outb = bb->out_bb [i];
1490 /* exception handlers are linked, but they should not be considered for stack args */
1491 if (outb->flags & BB_EXCEPTION_HANDLER)
1493 if (outb->in_scount) {
1494 if (outb->in_scount != bb->out_scount) {
1495 cfg->unverifiable = TRUE;
1498 continue; /* check they are the same locals */
1500 outb->in_scount = count;
1501 outb->in_stack = bb->out_stack;
1504 locals = bb->out_stack;
1506 for (i = 0; i < count; ++i) {
1507 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1508 inst->cil_code = sp [i]->cil_code;
1509 sp [i] = locals [i];
1510 if (cfg->verbose_level > 3)
1511 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1515 * It is possible that the out bblocks already have in_stack assigned, and
1516 * the in_stacks differ. In this case, we will store to all the different
1523 /* Find a bblock which has a different in_stack */
1525 while (bindex < bb->out_count) {
1526 outb = bb->out_bb [bindex];
1527 /* exception handlers are linked, but they should not be considered for stack args */
1528 if (outb->flags & BB_EXCEPTION_HANDLER) {
1532 if (outb->in_stack != locals) {
1533 for (i = 0; i < count; ++i) {
1534 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1535 inst->cil_code = sp [i]->cil_code;
1536 sp [i] = locals [i];
1537 if (cfg->verbose_level > 3)
1538 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1540 locals = outb->in_stack;
1549 /* Emit code which loads interface_offsets [klass->interface_id]
1550 * The array is stored in memory before vtable.
1553 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1555 if (cfg->compile_aot) {
1556 int ioffset_reg = alloc_preg (cfg);
1557 int iid_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1569 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1571 int ibitmap_reg = alloc_preg (cfg);
1572 #ifdef COMPRESSED_INTERFACE_BITMAP
1574 MonoInst *res, *ins;
1575 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1576 MONO_ADD_INS (cfg->cbb, ins);
1578 if (cfg->compile_aot)
1579 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1581 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1582 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1583 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1585 int ibitmap_byte_reg = alloc_preg (cfg);
1587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1589 if (cfg->compile_aot) {
1590 int iid_reg = alloc_preg (cfg);
1591 int shifted_iid_reg = alloc_preg (cfg);
1592 int ibitmap_byte_address_reg = alloc_preg (cfg);
1593 int masked_iid_reg = alloc_preg (cfg);
1594 int iid_one_bit_reg = alloc_preg (cfg);
1595 int iid_bit_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1598 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1599 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1601 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1602 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1605 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1612 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1613 * stored in "klass_reg" implements the interface "klass".
1616 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1618 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1622 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1623 * stored in "vtable_reg" implements the interface "klass".
1626 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1628 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1632 * Emit code which checks whenever the interface id of @klass is smaller than
1633 * than the value given by max_iid_reg.
1636 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1637 MonoBasicBlock *false_target)
1639 if (cfg->compile_aot) {
1640 int iid_reg = alloc_preg (cfg);
1641 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1642 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1649 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1652 /* Same as above, but obtains max_iid from a vtable */
1654 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1655 MonoBasicBlock *false_target)
1657 int max_iid_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1660 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1663 /* Same as above, but obtains max_iid from a klass */
1665 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1666 MonoBasicBlock *false_target)
1668 int max_iid_reg = alloc_preg (cfg);
1670 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1671 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1675 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1677 int idepth_reg = alloc_preg (cfg);
1678 int stypes_reg = alloc_preg (cfg);
1679 int stype = alloc_preg (cfg);
1681 mono_class_setup_supertypes (klass);
1683 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1684 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1685 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1686 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1692 } else if (cfg->compile_aot) {
1693 int const_reg = alloc_preg (cfg);
1694 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1695 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1699 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1703 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1705 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1709 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1711 int intf_reg = alloc_preg (cfg);
1713 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1714 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1715 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1719 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1723 * Variant of the above that takes a register to the class, not the vtable.
1726 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1728 int intf_bit_reg = alloc_preg (cfg);
1730 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1731 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1732 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1736 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1740 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1743 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1744 } else if (cfg->compile_aot) {
1745 int const_reg = alloc_preg (cfg);
1746 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1751 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1755 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1757 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1761 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1763 if (cfg->compile_aot) {
1764 int const_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1766 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1770 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1774 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1777 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1780 int rank_reg = alloc_preg (cfg);
1781 int eclass_reg = alloc_preg (cfg);
1783 g_assert (!klass_inst);
1784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1786 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1787 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1789 if (klass->cast_class == mono_defaults.object_class) {
1790 int parent_reg = alloc_preg (cfg);
1791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1792 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1793 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1794 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1795 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1796 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1797 } else if (klass->cast_class == mono_defaults.enum_class) {
1798 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1799 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1800 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1802 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1803 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1806 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1807 /* Check that the object is a vector too */
1808 int bounds_reg = alloc_preg (cfg);
1809 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1811 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1814 int idepth_reg = alloc_preg (cfg);
1815 int stypes_reg = alloc_preg (cfg);
1816 int stype = alloc_preg (cfg);
1818 mono_class_setup_supertypes (klass);
1820 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1821 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1822 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1823 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1826 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1827 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1832 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1834 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1838 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1842 g_assert (val == 0);
1847 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1850 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1856 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1858 #if SIZEOF_REGISTER == 8
1860 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1866 val_reg = alloc_preg (cfg);
1868 if (SIZEOF_REGISTER == 8)
1869 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1871 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1874 /* This could be optimized further if neccesary */
1876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1883 #if !NO_UNALIGNED_ACCESS
1884 if (SIZEOF_REGISTER == 8) {
1886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1899 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1904 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1916 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1923 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1924 g_assert (size < 10000);
1927 /* This could be optimized further if neccesary */
1929 cur_reg = alloc_preg (cfg);
1930 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1931 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1938 #if !NO_UNALIGNED_ACCESS
1939 if (SIZEOF_REGISTER == 8) {
1941 cur_reg = alloc_preg (cfg);
1942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1952 cur_reg = alloc_preg (cfg);
1953 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1954 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1960 cur_reg = alloc_preg (cfg);
1961 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1962 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1968 cur_reg = alloc_preg (cfg);
1969 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1970 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1978 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1982 if (cfg->compile_aot) {
1983 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1984 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1986 ins->sreg2 = c->dreg;
1987 MONO_ADD_INS (cfg->cbb, ins);
1989 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1991 ins->inst_offset = mini_get_tls_offset (tls_key);
1992 MONO_ADD_INS (cfg->cbb, ins);
1999 * Emit IR to push the current LMF onto the LMF stack.
2002 emit_push_lmf (MonoCompile *cfg)
2005 * Emit IR to push the LMF:
2006 * lmf_addr = <lmf_addr from tls>
2007 * lmf->lmf_addr = lmf_addr
2008 * lmf->prev_lmf = *lmf_addr
2011 int lmf_reg, prev_lmf_reg;
2012 MonoInst *ins, *lmf_ins;
2017 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2018 /* Load current lmf */
2019 lmf_ins = mono_get_lmf_intrinsic (cfg);
2021 MONO_ADD_INS (cfg->cbb, lmf_ins);
2022 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2023 lmf_reg = ins->dreg;
2024 /* Save previous_lmf */
2025 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2027 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2030 * Store lmf_addr in a variable, so it can be allocated to a global register.
2032 if (!cfg->lmf_addr_var)
2033 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2036 ins = mono_get_jit_tls_intrinsic (cfg);
2038 int jit_tls_dreg = ins->dreg;
2040 MONO_ADD_INS (cfg->cbb, ins);
2041 lmf_reg = alloc_preg (cfg);
2042 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2044 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2047 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2049 MONO_ADD_INS (cfg->cbb, lmf_ins);
2052 MonoInst *args [16], *jit_tls_ins, *ins;
2054 /* Inline mono_get_lmf_addr () */
2055 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2057 /* Load mono_jit_tls_id */
2058 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2059 /* call pthread_getspecific () */
2060 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2061 /* lmf_addr = &jit_tls->lmf */
2062 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2065 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2069 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2071 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2072 lmf_reg = ins->dreg;
2074 prev_lmf_reg = alloc_preg (cfg);
2075 /* Save previous_lmf */
2076 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2077 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2079 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2086 * Emit IR to pop the current LMF from the LMF stack.
2089 emit_pop_lmf (MonoCompile *cfg)
2091 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2097 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2098 lmf_reg = ins->dreg;
2100 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2101 /* Load previous_lmf */
2102 prev_lmf_reg = alloc_preg (cfg);
2103 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2105 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2108 * Emit IR to pop the LMF:
2109 * *(lmf->lmf_addr) = lmf->prev_lmf
2111 /* This could be called before emit_push_lmf () */
2112 if (!cfg->lmf_addr_var)
2113 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2114 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2116 prev_lmf_reg = alloc_preg (cfg);
2117 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2118 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2123 emit_instrumentation_call (MonoCompile *cfg, void *func)
2125 MonoInst *iargs [1];
2128 * Avoid instrumenting inlined methods since it can
2129 * distort profiling results.
2131 if (cfg->method != cfg->current_method)
2134 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2135 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2136 mono_emit_jit_icall (cfg, func, iargs);
2141 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2144 type = mini_get_underlying_type (cfg, type);
2145 switch (type->type) {
2146 case MONO_TYPE_VOID:
2147 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2150 case MONO_TYPE_BOOLEAN:
2153 case MONO_TYPE_CHAR:
2156 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2160 case MONO_TYPE_FNPTR:
2161 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2162 case MONO_TYPE_CLASS:
2163 case MONO_TYPE_STRING:
2164 case MONO_TYPE_OBJECT:
2165 case MONO_TYPE_SZARRAY:
2166 case MONO_TYPE_ARRAY:
2167 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2170 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2173 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2175 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2177 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2178 case MONO_TYPE_VALUETYPE:
2179 if (type->data.klass->enumtype) {
2180 type = mono_class_enum_basetype (type->data.klass);
2183 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2184 case MONO_TYPE_TYPEDBYREF:
2185 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2186 case MONO_TYPE_GENERICINST:
2187 type = &type->data.generic_class->container_class->byval_arg;
2190 case MONO_TYPE_MVAR:
2192 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2194 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2200 * target_type_is_incompatible:
2201 * @cfg: MonoCompile context
2203 * Check that the item @arg on the evaluation stack can be stored
2204 * in the target type (can be a local, or field, etc).
2205 * The cfg arg can be used to check if we need verification or just
2208 * Returns: non-0 value if arg can't be stored on a target.
2211 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2213 MonoType *simple_type;
2216 if (target->byref) {
2217 /* FIXME: check that the pointed to types match */
2218 if (arg->type == STACK_MP)
2219 return arg->klass != mono_class_from_mono_type (target);
2220 if (arg->type == STACK_PTR)
2225 simple_type = mini_get_underlying_type (cfg, target);
2226 switch (simple_type->type) {
2227 case MONO_TYPE_VOID:
2231 case MONO_TYPE_BOOLEAN:
2234 case MONO_TYPE_CHAR:
2237 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2241 /* STACK_MP is needed when setting pinned locals */
2242 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2247 case MONO_TYPE_FNPTR:
2249 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2250 * in native int. (#688008).
2252 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2255 case MONO_TYPE_CLASS:
2256 case MONO_TYPE_STRING:
2257 case MONO_TYPE_OBJECT:
2258 case MONO_TYPE_SZARRAY:
2259 case MONO_TYPE_ARRAY:
2260 if (arg->type != STACK_OBJ)
2262 /* FIXME: check type compatibility */
2266 if (arg->type != STACK_I8)
2270 if (arg->type != cfg->r4_stack_type)
2274 if (arg->type != STACK_R8)
2277 case MONO_TYPE_VALUETYPE:
2278 if (arg->type != STACK_VTYPE)
2280 klass = mono_class_from_mono_type (simple_type);
2281 if (klass != arg->klass)
2284 case MONO_TYPE_TYPEDBYREF:
2285 if (arg->type != STACK_VTYPE)
2287 klass = mono_class_from_mono_type (simple_type);
2288 if (klass != arg->klass)
2291 case MONO_TYPE_GENERICINST:
2292 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2293 if (arg->type != STACK_VTYPE)
2295 klass = mono_class_from_mono_type (simple_type);
2296 if (klass != arg->klass)
2300 if (arg->type != STACK_OBJ)
2302 /* FIXME: check type compatibility */
2306 case MONO_TYPE_MVAR:
2307 g_assert (cfg->generic_sharing_context);
2308 if (mini_type_var_is_vt (cfg, simple_type)) {
2309 if (arg->type != STACK_VTYPE)
2312 if (arg->type != STACK_OBJ)
2317 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2323 * Prepare arguments for passing to a function call.
2324 * Return a non-zero value if the arguments can't be passed to the given
2326 * The type checks are not yet complete and some conversions may need
2327 * casts on 32 or 64 bit architectures.
2329 * FIXME: implement this using target_type_is_incompatible ()
2332 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2334 MonoType *simple_type;
2338 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2342 for (i = 0; i < sig->param_count; ++i) {
2343 if (sig->params [i]->byref) {
2344 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2348 simple_type = sig->params [i];
2349 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2351 switch (simple_type->type) {
2352 case MONO_TYPE_VOID:
2357 case MONO_TYPE_BOOLEAN:
2360 case MONO_TYPE_CHAR:
2363 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2369 case MONO_TYPE_FNPTR:
2370 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2373 case MONO_TYPE_CLASS:
2374 case MONO_TYPE_STRING:
2375 case MONO_TYPE_OBJECT:
2376 case MONO_TYPE_SZARRAY:
2377 case MONO_TYPE_ARRAY:
2378 if (args [i]->type != STACK_OBJ)
2383 if (args [i]->type != STACK_I8)
2387 if (args [i]->type != cfg->r4_stack_type)
2391 if (args [i]->type != STACK_R8)
2394 case MONO_TYPE_VALUETYPE:
2395 if (simple_type->data.klass->enumtype) {
2396 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2399 if (args [i]->type != STACK_VTYPE)
2402 case MONO_TYPE_TYPEDBYREF:
2403 if (args [i]->type != STACK_VTYPE)
2406 case MONO_TYPE_GENERICINST:
2407 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2410 case MONO_TYPE_MVAR:
2412 if (args [i]->type != STACK_VTYPE)
2416 g_error ("unknown type 0x%02x in check_call_signature",
2424 callvirt_to_call (int opcode)
2427 case OP_CALL_MEMBASE:
2429 case OP_VOIDCALL_MEMBASE:
2431 case OP_FCALL_MEMBASE:
2433 case OP_RCALL_MEMBASE:
2435 case OP_VCALL_MEMBASE:
2437 case OP_LCALL_MEMBASE:
2440 g_assert_not_reached ();
2446 /* Either METHOD or IMT_ARG needs to be set */
2448 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2452 if (COMPILE_LLVM (cfg)) {
2453 method_reg = alloc_preg (cfg);
2456 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2457 } else if (cfg->compile_aot) {
2458 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2461 MONO_INST_NEW (cfg, ins, OP_PCONST);
2462 ins->inst_p0 = method;
2463 ins->dreg = method_reg;
2464 MONO_ADD_INS (cfg->cbb, ins);
2468 call->imt_arg_reg = method_reg;
2470 #ifdef MONO_ARCH_IMT_REG
2471 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2473 /* Need this to keep the IMT arg alive */
2474 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2479 #ifdef MONO_ARCH_IMT_REG
2480 method_reg = alloc_preg (cfg);
2483 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2484 } else if (cfg->compile_aot) {
2485 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2488 MONO_INST_NEW (cfg, ins, OP_PCONST);
2489 ins->inst_p0 = method;
2490 ins->dreg = method_reg;
2491 MONO_ADD_INS (cfg->cbb, ins);
2494 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2496 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2500 static MonoJumpInfo *
2501 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2503 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2507 ji->data.target = target;
2513 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2515 if (cfg->generic_sharing_context)
2516 return mono_class_check_context_used (klass);
2522 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2524 if (cfg->generic_sharing_context)
2525 return mono_method_check_context_used (method);
2531 * check_method_sharing:
2533 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2536 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2538 gboolean pass_vtable = FALSE;
2539 gboolean pass_mrgctx = FALSE;
2541 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2542 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2543 gboolean sharable = FALSE;
2545 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2548 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2549 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2550 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2552 sharable = sharing_enabled && context_sharable;
2556 * Pass vtable iff target method might
2557 * be shared, which means that sharing
2558 * is enabled for its class and its
2559 * context is sharable (and it's not a
2562 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2566 if (mini_method_get_context (cmethod) &&
2567 mini_method_get_context (cmethod)->method_inst) {
2568 g_assert (!pass_vtable);
2570 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2573 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2574 MonoGenericContext *context = mini_method_get_context (cmethod);
2575 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2577 if (sharing_enabled && context_sharable)
2579 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2584 if (out_pass_vtable)
2585 *out_pass_vtable = pass_vtable;
2586 if (out_pass_mrgctx)
2587 *out_pass_mrgctx = pass_mrgctx;
2590 inline static MonoCallInst *
2591 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2592 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2596 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2601 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2603 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2605 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2608 call->signature = sig;
2609 call->rgctx_reg = rgctx;
2610 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2612 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2615 if (mini_type_is_vtype (cfg, sig_ret)) {
2616 call->vret_var = cfg->vret_addr;
2617 //g_assert_not_reached ();
2619 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2620 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2623 temp->backend.is_pinvoke = sig->pinvoke;
2626 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2627 * address of return value to increase optimization opportunities.
2628 * Before vtype decomposition, the dreg of the call ins itself represents the
2629 * fact the call modifies the return value. After decomposition, the call will
2630 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2631 * will be transformed into an LDADDR.
2633 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2634 loada->dreg = alloc_preg (cfg);
2635 loada->inst_p0 = temp;
2636 /* We reference the call too since call->dreg could change during optimization */
2637 loada->inst_p1 = call;
2638 MONO_ADD_INS (cfg->cbb, loada);
2640 call->inst.dreg = temp->dreg;
2642 call->vret_var = loada;
2643 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2644 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2646 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2647 if (COMPILE_SOFT_FLOAT (cfg)) {
2649 * If the call has a float argument, we would need to do an r8->r4 conversion using
2650 * an icall, but that cannot be done during the call sequence since it would clobber
2651 * the call registers + the stack. So we do it before emitting the call.
2653 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2655 MonoInst *in = call->args [i];
2657 if (i >= sig->hasthis)
2658 t = sig->params [i - sig->hasthis];
2660 t = &mono_defaults.int_class->byval_arg;
2661 t = mono_type_get_underlying_type (t);
2663 if (!t->byref && t->type == MONO_TYPE_R4) {
2664 MonoInst *iargs [1];
2668 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2670 /* The result will be in an int vreg */
2671 call->args [i] = conv;
2677 call->need_unbox_trampoline = unbox_trampoline;
2680 if (COMPILE_LLVM (cfg))
2681 mono_llvm_emit_call (cfg, call);
2683 mono_arch_emit_call (cfg, call);
2685 mono_arch_emit_call (cfg, call);
2688 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2689 cfg->flags |= MONO_CFG_HAS_CALLS;
2695 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2697 #ifdef MONO_ARCH_RGCTX_REG
2698 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2699 cfg->uses_rgctx_reg = TRUE;
2700 call->rgctx_reg = TRUE;
2702 call->rgctx_arg_reg = rgctx_reg;
2709 inline static MonoInst*
2710 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2715 gboolean check_sp = FALSE;
2717 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2718 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2720 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2725 rgctx_reg = mono_alloc_preg (cfg);
2726 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2730 if (!cfg->stack_inbalance_var)
2731 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2733 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2734 ins->dreg = cfg->stack_inbalance_var->dreg;
2735 MONO_ADD_INS (cfg->cbb, ins);
2738 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2740 call->inst.sreg1 = addr->dreg;
2743 emit_imt_argument (cfg, call, NULL, imt_arg);
2745 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2750 sp_reg = mono_alloc_preg (cfg);
2752 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2754 MONO_ADD_INS (cfg->cbb, ins);
2756 /* Restore the stack so we don't crash when throwing the exception */
2757 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2758 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2759 MONO_ADD_INS (cfg->cbb, ins);
2761 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2762 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2766 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2768 return (MonoInst*)call;
2772 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2775 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2777 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2780 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2781 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2783 #ifndef DISABLE_REMOTING
2784 gboolean might_be_remote = FALSE;
2786 gboolean virtual = this != NULL;
2787 gboolean enable_for_aot = TRUE;
2791 gboolean need_unbox_trampoline;
2794 sig = mono_method_signature (method);
2797 rgctx_reg = mono_alloc_preg (cfg);
2798 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2801 if (method->string_ctor) {
2802 /* Create the real signature */
2803 /* FIXME: Cache these */
2804 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2805 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2810 context_used = mini_method_check_context_used (cfg, method);
2812 #ifndef DISABLE_REMOTING
2813 might_be_remote = this && sig->hasthis &&
2814 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2815 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2817 if (might_be_remote && context_used) {
2820 g_assert (cfg->generic_sharing_context);
2822 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2824 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2828 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2830 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2832 #ifndef DISABLE_REMOTING
2833 if (might_be_remote)
2834 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2837 call->method = method;
2838 call->inst.flags |= MONO_INST_HAS_METHOD;
2839 call->inst.inst_left = this;
2840 call->tail_call = tail;
2843 int vtable_reg, slot_reg, this_reg;
2846 this_reg = this->dreg;
2848 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2849 MonoInst *dummy_use;
2851 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2853 /* Make a call to delegate->invoke_impl */
2854 call->inst.inst_basereg = this_reg;
2855 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2856 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2858 /* We must emit a dummy use here because the delegate trampoline will
2859 replace the 'this' argument with the delegate target making this activation
2860 no longer a root for the delegate.
2861 This is an issue for delegates that target collectible code such as dynamic
2862 methods of GC'able assemblies.
2864 For a test case look into #667921.
2866 FIXME: a dummy use is not the best way to do it as the local register allocator
2867 will put it on a caller save register and spil it around the call.
2868 Ideally, we would either put it on a callee save register or only do the store part.
2870 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2872 return (MonoInst*)call;
2875 if ((!cfg->compile_aot || enable_for_aot) &&
2876 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2877 (MONO_METHOD_IS_FINAL (method) &&
2878 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2879 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2881 * the method is not virtual, we just need to ensure this is not null
2882 * and then we can call the method directly.
2884 #ifndef DISABLE_REMOTING
2885 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2887 * The check above ensures method is not gshared, this is needed since
2888 * gshared methods can't have wrappers.
2890 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2894 if (!method->string_ctor)
2895 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2897 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2898 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2900 * the method is virtual, but we can statically dispatch since either
2901 * it's class or the method itself are sealed.
2902 * But first we need to ensure it's not a null reference.
2904 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2906 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2908 vtable_reg = alloc_preg (cfg);
2909 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2910 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2913 guint32 imt_slot = mono_method_get_imt_slot (method);
2914 emit_imt_argument (cfg, call, call->method, imt_arg);
2915 slot_reg = vtable_reg;
2916 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2918 if (slot_reg == -1) {
2919 slot_reg = alloc_preg (cfg);
2920 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2921 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2924 slot_reg = vtable_reg;
2925 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2926 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2928 g_assert (mono_method_signature (method)->generic_param_count);
2929 emit_imt_argument (cfg, call, call->method, imt_arg);
2933 call->inst.sreg1 = slot_reg;
2934 call->inst.inst_offset = offset;
2935 call->virtual = TRUE;
2939 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2942 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2944 return (MonoInst*)call;
2948 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2950 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2954 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2961 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2964 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2966 return (MonoInst*)call;
2970 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2972 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2976 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2980 * mono_emit_abs_call:
2982 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2984 inline static MonoInst*
2985 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2986 MonoMethodSignature *sig, MonoInst **args)
2988 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2992 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2995 if (cfg->abs_patches == NULL)
2996 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2997 g_hash_table_insert (cfg->abs_patches, ji, ji);
2998 ins = mono_emit_native_call (cfg, ji, sig, args);
2999 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3004 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
3006 gboolean no_wrapper = FALSE;
3009 * Call the jit icall without a wrapper if possible.
3010 * The wrapper is needed for the following reasons:
3011 * - to handle exceptions thrown using mono_raise_exceptions () from the
3012 * icall function. The EH code needs the lmf frame pushed by the
3013 * wrapper to be able to unwind back to managed code.
3014 * - to be able to do stack walks for asynchronously suspended
3015 * threads when debugging.
3017 if (info->no_raise) {
3018 if (cfg->compile_aot) {
3019 // FIXME: This might be loaded into a runtime during debugging
3020 // even if it is not compiled using 'soft-debug'.
3023 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3024 if ((cfg->compile_llvm && SIZEOF_VOID_P == 8) || cfg->gen_seq_points_debug_data)
3033 if (!info->wrapper_method) {
3034 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3035 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3037 mono_memory_barrier ();
3041 * Inline the wrapper method, which is basically a call to the C icall, and
3042 * an exception check.
3044 costs = inline_method (cfg, info->wrapper_method, NULL,
3045 args, NULL, cfg->real_offset, TRUE, out_cbb);
3046 g_assert (costs > 0);
3047 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3051 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3056 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3058 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3059 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3063 * Native code might return non register sized integers
3064 * without initializing the upper bits.
3066 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3067 case OP_LOADI1_MEMBASE:
3068 widen_op = OP_ICONV_TO_I1;
3070 case OP_LOADU1_MEMBASE:
3071 widen_op = OP_ICONV_TO_U1;
3073 case OP_LOADI2_MEMBASE:
3074 widen_op = OP_ICONV_TO_I2;
3076 case OP_LOADU2_MEMBASE:
3077 widen_op = OP_ICONV_TO_U2;
3083 if (widen_op != -1) {
3084 int dreg = alloc_preg (cfg);
3087 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3088 widen->type = ins->type;
3098 get_memcpy_method (void)
3100 static MonoMethod *memcpy_method = NULL;
3101 if (!memcpy_method) {
3102 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3104 g_error ("Old corlib found. Install a new one");
3106 return memcpy_method;
3110 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3112 MonoClassField *field;
3113 gpointer iter = NULL;
3115 while ((field = mono_class_get_fields (klass, &iter))) {
3118 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3120 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3121 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3122 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3123 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3125 MonoClass *field_class = mono_class_from_mono_type (field->type);
3126 if (field_class->has_references)
3127 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3133 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3135 int card_table_shift_bits;
3136 gpointer card_table_mask;
3138 MonoInst *dummy_use;
3139 int nursery_shift_bits;
3140 size_t nursery_size;
3141 gboolean has_card_table_wb = FALSE;
3143 if (!cfg->gen_write_barriers)
3146 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3148 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3150 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3151 has_card_table_wb = TRUE;
3154 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3157 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3158 wbarrier->sreg1 = ptr->dreg;
3159 wbarrier->sreg2 = value->dreg;
3160 MONO_ADD_INS (cfg->cbb, wbarrier);
3161 } else if (card_table) {
3162 int offset_reg = alloc_preg (cfg);
3163 int card_reg = alloc_preg (cfg);
3166 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3167 if (card_table_mask)
3168 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3170 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3171 * IMM's larger than 32bits.
3173 if (cfg->compile_aot) {
3174 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3176 MONO_INST_NEW (cfg, ins, OP_PCONST);
3177 ins->inst_p0 = card_table;
3178 ins->dreg = card_reg;
3179 MONO_ADD_INS (cfg->cbb, ins);
3182 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3183 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3185 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3186 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3189 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3193 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3195 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3196 unsigned need_wb = 0;
3201 /*types with references can't have alignment smaller than sizeof(void*) */
3202 if (align < SIZEOF_VOID_P)
3205 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3206 if (size > 32 * SIZEOF_VOID_P)
3209 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3211 /* We don't unroll more than 5 stores to avoid code bloat. */
3212 if (size > 5 * SIZEOF_VOID_P) {
3213 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3214 size += (SIZEOF_VOID_P - 1);
3215 size &= ~(SIZEOF_VOID_P - 1);
3217 EMIT_NEW_ICONST (cfg, iargs [2], size);
3218 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3219 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3223 destreg = iargs [0]->dreg;
3224 srcreg = iargs [1]->dreg;
3227 dest_ptr_reg = alloc_preg (cfg);
3228 tmp_reg = alloc_preg (cfg);
3231 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3233 while (size >= SIZEOF_VOID_P) {
3234 MonoInst *load_inst;
3235 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3236 load_inst->dreg = tmp_reg;
3237 load_inst->inst_basereg = srcreg;
3238 load_inst->inst_offset = offset;
3239 MONO_ADD_INS (cfg->cbb, load_inst);
3241 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3244 emit_write_barrier (cfg, iargs [0], load_inst);
3246 offset += SIZEOF_VOID_P;
3247 size -= SIZEOF_VOID_P;
3250 /*tmp += sizeof (void*)*/
3251 if (size >= SIZEOF_VOID_P) {
3252 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3253 MONO_ADD_INS (cfg->cbb, iargs [0]);
3257 /* Those cannot be references since size < sizeof (void*) */
3259 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3260 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3266 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3267 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3273 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3274 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3283 * Emit code to copy a valuetype of type @klass whose address is stored in
3284 * @src->dreg to memory whose address is stored at @dest->dreg.
3287 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3289 MonoInst *iargs [4];
3290 int context_used, n;
3292 MonoMethod *memcpy_method;
3293 MonoInst *size_ins = NULL;
3294 MonoInst *memcpy_ins = NULL;
3298 * This check breaks with spilled vars... need to handle it during verification anyway.
3299 * g_assert (klass && klass == src->klass && klass == dest->klass);
3302 if (mini_is_gsharedvt_klass (cfg, klass)) {
3304 context_used = mini_class_check_context_used (cfg, klass);
3305 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3306 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3310 n = mono_class_native_size (klass, &align);
3312 n = mono_class_value_size (klass, &align);
3314 /* if native is true there should be no references in the struct */
3315 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3316 /* Avoid barriers when storing to the stack */
3317 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3318 (dest->opcode == OP_LDADDR))) {
3324 context_used = mini_class_check_context_used (cfg, klass);
3326 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3327 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3329 } else if (context_used) {
3330 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3332 if (cfg->compile_aot) {
3333 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3335 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3336 mono_class_compute_gc_descriptor (klass);
3341 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3343 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3348 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3349 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3350 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3355 iargs [2] = size_ins;
3357 EMIT_NEW_ICONST (cfg, iargs [2], n);
3359 memcpy_method = get_memcpy_method ();
3361 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3363 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3368 get_memset_method (void)
3370 static MonoMethod *memset_method = NULL;
3371 if (!memset_method) {
3372 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3374 g_error ("Old corlib found. Install a new one");
3376 return memset_method;
3380 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3382 MonoInst *iargs [3];
3383 int n, context_used;
3385 MonoMethod *memset_method;
3386 MonoInst *size_ins = NULL;
3387 MonoInst *bzero_ins = NULL;
3388 static MonoMethod *bzero_method;
3390 /* FIXME: Optimize this for the case when dest is an LDADDR */
3392 mono_class_init (klass);
3393 if (mini_is_gsharedvt_klass (cfg, klass)) {
3394 context_used = mini_class_check_context_used (cfg, klass);
3395 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3396 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3398 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3399 g_assert (bzero_method);
3401 iargs [1] = size_ins;
3402 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3406 n = mono_class_value_size (klass, &align);
3408 if (n <= sizeof (gpointer) * 8) {
3409 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3412 memset_method = get_memset_method ();
3414 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3415 EMIT_NEW_ICONST (cfg, iargs [2], n);
3416 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3421 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3423 MonoInst *this = NULL;
3425 g_assert (cfg->generic_sharing_context);
3427 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3428 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3429 !method->klass->valuetype)
3430 EMIT_NEW_ARGLOAD (cfg, this, 0);
3432 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3433 MonoInst *mrgctx_loc, *mrgctx_var;
3436 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3438 mrgctx_loc = mono_get_vtable_var (cfg);
3439 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3442 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3443 MonoInst *vtable_loc, *vtable_var;
3447 vtable_loc = mono_get_vtable_var (cfg);
3448 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3450 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3451 MonoInst *mrgctx_var = vtable_var;
3454 vtable_reg = alloc_preg (cfg);
3455 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3456 vtable_var->type = STACK_PTR;
3464 vtable_reg = alloc_preg (cfg);
3465 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3470 static MonoJumpInfoRgctxEntry *
3471 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3473 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3474 res->method = method;
3475 res->in_mrgctx = in_mrgctx;
3476 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3477 res->data->type = patch_type;
3478 res->data->data.target = patch_data;
3479 res->info_type = info_type;
3484 static inline MonoInst*
3485 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3487 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3491 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3492 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3494 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3495 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3497 return emit_rgctx_fetch (cfg, rgctx, entry);
3501 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3502 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3504 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3505 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3507 return emit_rgctx_fetch (cfg, rgctx, entry);
3511 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3512 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3514 MonoJumpInfoGSharedVtCall *call_info;
3515 MonoJumpInfoRgctxEntry *entry;
3518 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3519 call_info->sig = sig;
3520 call_info->method = cmethod;
3522 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3523 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3525 return emit_rgctx_fetch (cfg, rgctx, entry);
3530 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3531 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3533 MonoJumpInfoRgctxEntry *entry;
3536 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3537 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3539 return emit_rgctx_fetch (cfg, rgctx, entry);
3543 * emit_get_rgctx_method:
3545 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3546 * normal constants, else emit a load from the rgctx.
3549 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3550 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3552 if (!context_used) {
3555 switch (rgctx_type) {
3556 case MONO_RGCTX_INFO_METHOD:
3557 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3559 case MONO_RGCTX_INFO_METHOD_RGCTX:
3560 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3563 g_assert_not_reached ();
3566 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3567 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3569 return emit_rgctx_fetch (cfg, rgctx, entry);
3574 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3575 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3577 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3578 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3580 return emit_rgctx_fetch (cfg, rgctx, entry);
3584 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3586 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3587 MonoRuntimeGenericContextInfoTemplate *template;
3592 for (i = 0; i < info->num_entries; ++i) {
3593 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3595 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3599 if (info->num_entries == info->count_entries) {
3600 MonoRuntimeGenericContextInfoTemplate *new_entries;
3601 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3603 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3605 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3606 info->entries = new_entries;
3607 info->count_entries = new_count_entries;
3610 idx = info->num_entries;
3611 template = &info->entries [idx];
3612 template->info_type = rgctx_type;
3613 template->data = data;
3615 info->num_entries ++;
3621 * emit_get_gsharedvt_info:
3623 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3626 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3631 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3632 /* Load info->entries [idx] */
3633 dreg = alloc_preg (cfg);
3634 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3640 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3642 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3646 * On return the caller must check @klass for load errors.
3649 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3651 MonoInst *vtable_arg;
3655 context_used = mini_class_check_context_used (cfg, klass);
3658 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3659 klass, MONO_RGCTX_INFO_VTABLE);
3661 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3665 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3668 if (COMPILE_LLVM (cfg))
3669 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3671 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3672 #ifdef MONO_ARCH_VTABLE_REG
3673 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3674 cfg->uses_vtable_reg = TRUE;
3681 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3685 if (cfg->gen_seq_points && cfg->method == method) {
3686 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3688 ins->flags |= MONO_INST_NONEMPTY_STACK;
3689 MONO_ADD_INS (cfg->cbb, ins);
3694 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3696 if (mini_get_debug_options ()->better_cast_details) {
3697 int vtable_reg = alloc_preg (cfg);
3698 int klass_reg = alloc_preg (cfg);
3699 MonoBasicBlock *is_null_bb = NULL;
3701 int to_klass_reg, context_used;
3704 NEW_BBLOCK (cfg, is_null_bb);
3706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3707 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3710 tls_get = mono_get_jit_tls_intrinsic (cfg);
3712 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3716 MONO_ADD_INS (cfg->cbb, tls_get);
3717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3718 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3722 context_used = mini_class_check_context_used (cfg, klass);
3724 MonoInst *class_ins;
3726 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3727 to_klass_reg = class_ins->dreg;
3729 to_klass_reg = alloc_preg (cfg);
3730 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3732 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3735 MONO_START_BB (cfg, is_null_bb);
3737 *out_bblock = cfg->cbb;
3743 reset_cast_details (MonoCompile *cfg)
3745 /* Reset the variables holding the cast details */
3746 if (mini_get_debug_options ()->better_cast_details) {
3747 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3749 MONO_ADD_INS (cfg->cbb, tls_get);
3750 /* It is enough to reset the from field */
3751 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3756 * On return the caller must check @array_class for load errors
3759 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3761 int vtable_reg = alloc_preg (cfg);
3764 context_used = mini_class_check_context_used (cfg, array_class);
3766 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3768 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3770 if (cfg->opt & MONO_OPT_SHARED) {
3771 int class_reg = alloc_preg (cfg);
3772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3773 if (cfg->compile_aot) {
3774 int klass_reg = alloc_preg (cfg);
3775 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3776 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3780 } else if (context_used) {
3781 MonoInst *vtable_ins;
3783 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3784 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3786 if (cfg->compile_aot) {
3790 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3792 vt_reg = alloc_preg (cfg);
3793 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3794 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3797 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3799 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3803 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3805 reset_cast_details (cfg);
3809 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3810 * generic code is generated.
3813 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3815 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3818 MonoInst *rgctx, *addr;
3820 /* FIXME: What if the class is shared? We might not
3821 have to get the address of the method from the
3823 addr = emit_get_rgctx_method (cfg, context_used, method,
3824 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3826 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3828 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3830 gboolean pass_vtable, pass_mrgctx;
3831 MonoInst *rgctx_arg = NULL;
3833 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3834 g_assert (!pass_mrgctx);
3837 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3840 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3843 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3848 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3852 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3853 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3854 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3855 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3857 obj_reg = sp [0]->dreg;
3858 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3859 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3861 /* FIXME: generics */
3862 g_assert (klass->rank == 0);
3865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3866 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3868 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3869 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3872 MonoInst *element_class;
3874 /* This assertion is from the unboxcast insn */
3875 g_assert (klass->rank == 0);
3877 element_class = emit_get_rgctx_klass (cfg, context_used,
3878 klass->element_class, MONO_RGCTX_INFO_KLASS);
3880 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3881 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3883 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3884 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3885 reset_cast_details (cfg);
3888 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3889 MONO_ADD_INS (cfg->cbb, add);
3890 add->type = STACK_MP;
3897 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3899 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3900 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3904 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3910 args [1] = klass_inst;
3913 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3915 NEW_BBLOCK (cfg, is_ref_bb);
3916 NEW_BBLOCK (cfg, is_nullable_bb);
3917 NEW_BBLOCK (cfg, end_bb);
3918 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3919 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3920 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3925 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3926 addr_reg = alloc_dreg (cfg, STACK_MP);
3930 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3931 MONO_ADD_INS (cfg->cbb, addr);
3933 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3936 MONO_START_BB (cfg, is_ref_bb);
3938 /* Save the ref to a temporary */
3939 dreg = alloc_ireg (cfg);
3940 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3941 addr->dreg = addr_reg;
3942 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3943 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3946 MONO_START_BB (cfg, is_nullable_bb);
3949 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3950 MonoInst *unbox_call;
3951 MonoMethodSignature *unbox_sig;
3954 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3956 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3957 unbox_sig->ret = &klass->byval_arg;
3958 unbox_sig->param_count = 1;
3959 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3960 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3962 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3963 addr->dreg = addr_reg;
3966 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3969 MONO_START_BB (cfg, end_bb);
3972 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3974 *out_cbb = cfg->cbb;
3980 * Returns NULL and set the cfg exception on error.
3983 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3985 MonoInst *iargs [2];
3991 MonoInst *iargs [2];
3992 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3994 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3996 if (cfg->opt & MONO_OPT_SHARED)
3997 rgctx_info = MONO_RGCTX_INFO_KLASS;
3999 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4000 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4002 if (cfg->opt & MONO_OPT_SHARED) {
4003 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4005 alloc_ftn = mono_object_new;
4008 alloc_ftn = mono_object_new_specific;
4011 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4012 if (known_instance_size)
4013 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
4014 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4017 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4020 if (cfg->opt & MONO_OPT_SHARED) {
4021 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4022 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4024 alloc_ftn = mono_object_new;
4025 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4026 /* This happens often in argument checking code, eg. throw new FooException... */
4027 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4028 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4029 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4031 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4032 MonoMethod *managed_alloc = NULL;
4036 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4037 cfg->exception_ptr = klass;
4041 #ifndef MONO_CROSS_COMPILE
4042 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4045 if (managed_alloc) {
4046 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4047 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
4048 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4050 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4052 guint32 lw = vtable->klass->instance_size;
4053 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4054 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4055 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4058 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4062 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4066 * Returns NULL and set the cfg exception on error.
4069 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4071 MonoInst *alloc, *ins;
4073 *out_cbb = cfg->cbb;
4075 if (mono_class_is_nullable (klass)) {
4076 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4079 /* FIXME: What if the class is shared? We might not
4080 have to get the method address from the RGCTX. */
4081 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4082 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4083 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4085 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4087 gboolean pass_vtable, pass_mrgctx;
4088 MonoInst *rgctx_arg = NULL;
4090 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4091 g_assert (!pass_mrgctx);
4094 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4097 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4100 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4104 if (mini_is_gsharedvt_klass (cfg, klass)) {
4105 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4106 MonoInst *res, *is_ref, *src_var, *addr;
4109 dreg = alloc_ireg (cfg);
4111 NEW_BBLOCK (cfg, is_ref_bb);
4112 NEW_BBLOCK (cfg, is_nullable_bb);
4113 NEW_BBLOCK (cfg, end_bb);
4114 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4115 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4116 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4118 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4119 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4122 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4125 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4126 ins->opcode = OP_STOREV_MEMBASE;
4128 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4129 res->type = STACK_OBJ;
4131 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4134 MONO_START_BB (cfg, is_ref_bb);
4135 addr_reg = alloc_ireg (cfg);
4137 /* val is a vtype, so has to load the value manually */
4138 src_var = get_vreg_to_inst (cfg, val->dreg);
4140 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4141 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4142 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4143 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4146 MONO_START_BB (cfg, is_nullable_bb);
4149 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4150 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4152 MonoMethodSignature *box_sig;
4155 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4156 * construct that method at JIT time, so have to do things by hand.
4158 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4159 box_sig->ret = &mono_defaults.object_class->byval_arg;
4160 box_sig->param_count = 1;
4161 box_sig->params [0] = &klass->byval_arg;
4162 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4163 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4164 res->type = STACK_OBJ;
4168 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4170 MONO_START_BB (cfg, end_bb);
4172 *out_cbb = cfg->cbb;
4176 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4180 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4187 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4190 MonoGenericContainer *container;
4191 MonoGenericInst *ginst;
4193 if (klass->generic_class) {
4194 container = klass->generic_class->container_class->generic_container;
4195 ginst = klass->generic_class->context.class_inst;
4196 } else if (klass->generic_container && context_used) {
4197 container = klass->generic_container;
4198 ginst = container->context.class_inst;
4203 for (i = 0; i < container->type_argc; ++i) {
4205 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4207 type = ginst->type_argv [i];
4208 if (mini_type_is_reference (cfg, type))
4214 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4217 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4219 MonoMethod *mono_castclass;
4222 mono_castclass = mono_marshal_get_castclass_with_cache ();
4224 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4225 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4226 reset_cast_details (cfg);
4227 *out_bblock = cfg->cbb;
4233 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4242 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4245 if (cfg->compile_aot) {
4246 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4247 cfg->castclass_cache_index ++;
4248 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4249 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4251 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4254 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4256 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4260 * Returns NULL and set the cfg exception on error.
4263 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4265 MonoBasicBlock *is_null_bb;
4266 int obj_reg = src->dreg;
4267 int vtable_reg = alloc_preg (cfg);
4269 MonoInst *klass_inst = NULL, *res;
4270 MonoBasicBlock *bblock;
4274 context_used = mini_class_check_context_used (cfg, klass);
4276 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4277 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4278 (*inline_costs) += 2;
4281 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4282 MonoMethod *mono_castclass;
4283 MonoInst *iargs [1];
4286 mono_castclass = mono_marshal_get_castclass (klass);
4289 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4290 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4291 iargs, ip, cfg->real_offset, TRUE, &bblock);
4292 reset_cast_details (cfg);
4293 CHECK_CFG_EXCEPTION;
4294 g_assert (costs > 0);
4296 cfg->real_offset += 5;
4298 (*inline_costs) += costs;
4307 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4308 MonoInst *cache_ins;
4310 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4315 /* klass - it's the second element of the cache entry*/
4316 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4319 args [2] = cache_ins;
4321 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4324 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4327 NEW_BBLOCK (cfg, is_null_bb);
4329 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4330 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4332 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4334 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4335 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4336 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4338 int klass_reg = alloc_preg (cfg);
4340 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4342 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4343 /* the remoting code is broken, access the class for now */
4344 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4345 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4347 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4348 cfg->exception_ptr = klass;
4351 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4354 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4356 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4358 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4359 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4363 MONO_START_BB (cfg, is_null_bb);
4365 reset_cast_details (cfg);
4376 * Returns NULL and set the cfg exception on error.
4379 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4382 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4383 int obj_reg = src->dreg;
4384 int vtable_reg = alloc_preg (cfg);
4385 int res_reg = alloc_ireg_ref (cfg);
4386 MonoInst *klass_inst = NULL;
4391 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4392 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4393 MonoInst *cache_ins;
4395 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4400 /* klass - it's the second element of the cache entry*/
4401 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4404 args [2] = cache_ins;
4406 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4409 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4412 NEW_BBLOCK (cfg, is_null_bb);
4413 NEW_BBLOCK (cfg, false_bb);
4414 NEW_BBLOCK (cfg, end_bb);
4416 /* Do the assignment at the beginning, so the other assignment can be if converted */
4417 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4418 ins->type = STACK_OBJ;
4421 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4422 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4424 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4426 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4427 g_assert (!context_used);
4428 /* the is_null_bb target simply copies the input register to the output */
4429 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4431 int klass_reg = alloc_preg (cfg);
4434 int rank_reg = alloc_preg (cfg);
4435 int eclass_reg = alloc_preg (cfg);
4437 g_assert (!context_used);
4438 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4441 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4442 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4443 if (klass->cast_class == mono_defaults.object_class) {
4444 int parent_reg = alloc_preg (cfg);
4445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4446 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4447 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4448 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4449 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4450 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4451 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4453 } else if (klass->cast_class == mono_defaults.enum_class) {
4454 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4456 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4457 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4459 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4460 /* Check that the object is a vector too */
4461 int bounds_reg = alloc_preg (cfg);
4462 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4463 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4464 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4467 /* the is_null_bb target simply copies the input register to the output */
4468 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4470 } else if (mono_class_is_nullable (klass)) {
4471 g_assert (!context_used);
4472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4473 /* the is_null_bb target simply copies the input register to the output */
4474 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4476 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4477 g_assert (!context_used);
4478 /* the remoting code is broken, access the class for now */
4479 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4480 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4482 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4483 cfg->exception_ptr = klass;
4486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4488 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4491 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4495 /* the is_null_bb target simply copies the input register to the output */
4496 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4501 MONO_START_BB (cfg, false_bb);
4503 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4504 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4506 MONO_START_BB (cfg, is_null_bb);
4508 MONO_START_BB (cfg, end_bb);
4514 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4516 /* This opcode takes as input an object reference and a class, and returns:
4517 0) if the object is an instance of the class,
4518 1) if the object is not instance of the class,
4519 2) if the object is a proxy whose type cannot be determined */
4522 #ifndef DISABLE_REMOTING
4523 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4525 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4527 int obj_reg = src->dreg;
4528 int dreg = alloc_ireg (cfg);
4530 #ifndef DISABLE_REMOTING
4531 int klass_reg = alloc_preg (cfg);
4534 NEW_BBLOCK (cfg, true_bb);
4535 NEW_BBLOCK (cfg, false_bb);
4536 NEW_BBLOCK (cfg, end_bb);
4537 #ifndef DISABLE_REMOTING
4538 NEW_BBLOCK (cfg, false2_bb);
4539 NEW_BBLOCK (cfg, no_proxy_bb);
4542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4543 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4545 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4546 #ifndef DISABLE_REMOTING
4547 NEW_BBLOCK (cfg, interface_fail_bb);
4550 tmp_reg = alloc_preg (cfg);
4551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4552 #ifndef DISABLE_REMOTING
4553 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4554 MONO_START_BB (cfg, interface_fail_bb);
4555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4557 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4559 tmp_reg = alloc_preg (cfg);
4560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4564 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4567 #ifndef DISABLE_REMOTING
4568 tmp_reg = alloc_preg (cfg);
4569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4572 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4573 tmp_reg = alloc_preg (cfg);
4574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4577 tmp_reg = alloc_preg (cfg);
4578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4582 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4585 MONO_START_BB (cfg, no_proxy_bb);
4587 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4589 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4593 MONO_START_BB (cfg, false_bb);
4595 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4596 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4598 #ifndef DISABLE_REMOTING
4599 MONO_START_BB (cfg, false2_bb);
4601 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4605 MONO_START_BB (cfg, true_bb);
4607 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4609 MONO_START_BB (cfg, end_bb);
4612 MONO_INST_NEW (cfg, ins, OP_ICONST);
4614 ins->type = STACK_I4;
4620 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4622 /* This opcode takes as input an object reference and a class, and returns:
4623 0) if the object is an instance of the class,
4624 1) if the object is a proxy whose type cannot be determined
4625 an InvalidCastException exception is thrown otherwhise*/
4628 #ifndef DISABLE_REMOTING
4629 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4631 MonoBasicBlock *ok_result_bb;
4633 int obj_reg = src->dreg;
4634 int dreg = alloc_ireg (cfg);
4635 int tmp_reg = alloc_preg (cfg);
4637 #ifndef DISABLE_REMOTING
4638 int klass_reg = alloc_preg (cfg);
4639 NEW_BBLOCK (cfg, end_bb);
4642 NEW_BBLOCK (cfg, ok_result_bb);
4644 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4645 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4647 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4649 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4650 #ifndef DISABLE_REMOTING
4651 NEW_BBLOCK (cfg, interface_fail_bb);
4653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4654 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4655 MONO_START_BB (cfg, interface_fail_bb);
4656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4658 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4660 tmp_reg = alloc_preg (cfg);
4661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4663 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4665 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4666 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4669 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4673 #ifndef DISABLE_REMOTING
4674 NEW_BBLOCK (cfg, no_proxy_bb);
4676 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4677 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4678 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4680 tmp_reg = alloc_preg (cfg);
4681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4684 tmp_reg = alloc_preg (cfg);
4685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4687 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4689 NEW_BBLOCK (cfg, fail_1_bb);
4691 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4693 MONO_START_BB (cfg, fail_1_bb);
4695 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4698 MONO_START_BB (cfg, no_proxy_bb);
4700 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4702 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4706 MONO_START_BB (cfg, ok_result_bb);
4708 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4710 #ifndef DISABLE_REMOTING
4711 MONO_START_BB (cfg, end_bb);
4715 MONO_INST_NEW (cfg, ins, OP_ICONST);
4717 ins->type = STACK_I4;
4722 static G_GNUC_UNUSED MonoInst*
4723 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4725 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4726 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4727 gboolean is_i4 = TRUE;
4729 switch (enum_type->type) {
4732 #if SIZEOF_REGISTER == 8
4741 MonoInst *load, *and, *cmp, *ceq;
4742 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4743 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4744 int dest_reg = alloc_ireg (cfg);
4746 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4747 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4748 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4749 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4751 ceq->type = STACK_I4;
4754 load = mono_decompose_opcode (cfg, load, NULL);
4755 and = mono_decompose_opcode (cfg, and, NULL);
4756 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4757 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4765 * Returns NULL and set the cfg exception on error.
4767 static G_GNUC_UNUSED MonoInst*
4768 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4772 gpointer trampoline;
4773 MonoInst *obj, *method_ins, *tramp_ins;
4777 // FIXME reenable optimisation for virtual case
4782 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4785 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4789 obj = handle_alloc (cfg, klass, FALSE, 0);
4793 /* Inline the contents of mono_delegate_ctor */
4795 /* Set target field */
4796 /* Optimize away setting of NULL target */
4797 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4799 if (cfg->gen_write_barriers) {
4800 dreg = alloc_preg (cfg);
4801 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4802 emit_write_barrier (cfg, ptr, target);
4806 /* Set method field */
4807 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4808 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4811 * To avoid looking up the compiled code belonging to the target method
4812 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4813 * store it, and we fill it after the method has been compiled.
4815 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4816 MonoInst *code_slot_ins;
4819 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4821 domain = mono_domain_get ();
4822 mono_domain_lock (domain);
4823 if (!domain_jit_info (domain)->method_code_hash)
4824 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4825 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4827 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4828 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4830 mono_domain_unlock (domain);
4832 if (cfg->compile_aot)
4833 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4835 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4840 if (cfg->compile_aot) {
4841 MonoDelegateClassMethodPair *del_tramp;
4843 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4844 del_tramp->klass = klass;
4845 del_tramp->method = context_used ? NULL : method;
4846 del_tramp->virtual = virtual;
4847 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4850 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4852 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4853 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4856 /* Set invoke_impl field */
4858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4860 dreg = alloc_preg (cfg);
4861 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4862 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4864 dreg = alloc_preg (cfg);
4865 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4869 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4875 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4877 MonoJitICallInfo *info;
4879 /* Need to register the icall so it gets an icall wrapper */
4880 info = mono_get_array_new_va_icall (rank);
4882 cfg->flags |= MONO_CFG_HAS_VARARGS;
4884 /* mono_array_new_va () needs a vararg calling convention */
4885 cfg->disable_llvm = TRUE;
4887 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4888 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4892 * handle_constrained_gsharedvt_call:
4894 * Handle constrained calls where the receiver is a gsharedvt type.
4895 * Return the instruction representing the call. Set the cfg exception on failure.
4898 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_call,
4899 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4901 MonoInst *ins = NULL;
4902 MonoBasicBlock *bblock = *ref_bblock;
4903 gboolean emit_widen = *ref_emit_widen;
4906 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4907 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4908 * pack the arguments into an array, and do the rest of the work in in an icall.
4910 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4911 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4912 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4913 MonoInst *args [16];
4916 * This case handles calls to
4917 * - object:ToString()/Equals()/GetHashCode(),
4918 * - System.IComparable<T>:CompareTo()
4919 * - System.IEquatable<T>:Equals ()
4920 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4924 if (mono_method_check_context_used (cmethod))
4925 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4927 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4928 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
4930 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4931 if (fsig->hasthis && fsig->param_count) {
4932 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4933 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4934 ins->dreg = alloc_preg (cfg);
4935 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4936 MONO_ADD_INS (cfg->cbb, ins);
4939 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4942 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4944 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4945 addr_reg = ins->dreg;
4946 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4948 EMIT_NEW_ICONST (cfg, args [3], 0);
4949 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4952 EMIT_NEW_ICONST (cfg, args [3], 0);
4953 EMIT_NEW_ICONST (cfg, args [4], 0);
4955 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4958 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
4959 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
4960 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4964 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4965 MONO_ADD_INS (cfg->cbb, add);
4967 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4968 MONO_ADD_INS (cfg->cbb, ins);
4969 /* ins represents the call result */
4972 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4975 *ref_emit_widen = emit_widen;
4976 *ref_bblock = bblock;
4985 mono_emit_load_got_addr (MonoCompile *cfg)
4987 MonoInst *getaddr, *dummy_use;
4989 if (!cfg->got_var || cfg->got_var_allocated)
4992 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4993 getaddr->cil_code = cfg->header->code;
4994 getaddr->dreg = cfg->got_var->dreg;
4996 /* Add it to the start of the first bblock */
4997 if (cfg->bb_entry->code) {
4998 getaddr->next = cfg->bb_entry->code;
4999 cfg->bb_entry->code = getaddr;
5002 MONO_ADD_INS (cfg->bb_entry, getaddr);
5004 cfg->got_var_allocated = TRUE;
5007 * Add a dummy use to keep the got_var alive, since real uses might
5008 * only be generated by the back ends.
5009 * Add it to end_bblock, so the variable's lifetime covers the whole
5011 * It would be better to make the usage of the got var explicit in all
5012 * cases when the backend needs it (i.e. calls, throw etc.), so this
5013 * wouldn't be needed.
5015 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5016 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5019 static int inline_limit;
5020 static gboolean inline_limit_inited;
5023 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5025 MonoMethodHeaderSummary header;
5027 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5028 MonoMethodSignature *sig = mono_method_signature (method);
5032 if (cfg->disable_inline)
5034 if (cfg->generic_sharing_context)
5037 if (cfg->inline_depth > 10)
5040 #ifdef MONO_ARCH_HAVE_LMF_OPS
5041 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5042 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5043 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5048 if (!mono_method_get_header_summary (method, &header))
5051 /*runtime, icall and pinvoke are checked by summary call*/
5052 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5053 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5054 (mono_class_is_marshalbyref (method->klass)) ||
5058 /* also consider num_locals? */
5059 /* Do the size check early to avoid creating vtables */
5060 if (!inline_limit_inited) {
5061 if (g_getenv ("MONO_INLINELIMIT"))
5062 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5064 inline_limit = INLINE_LENGTH_LIMIT;
5065 inline_limit_inited = TRUE;
5067 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5071 * if we can initialize the class of the method right away, we do,
5072 * otherwise we don't allow inlining if the class needs initialization,
5073 * since it would mean inserting a call to mono_runtime_class_init()
5074 * inside the inlined code
5076 if (!(cfg->opt & MONO_OPT_SHARED)) {
5077 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5078 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5079 vtable = mono_class_vtable (cfg->domain, method->klass);
5082 if (!cfg->compile_aot)
5083 mono_runtime_class_init (vtable);
5084 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5085 if (cfg->run_cctors && method->klass->has_cctor) {
5086 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5087 if (!method->klass->runtime_info)
5088 /* No vtable created yet */
5090 vtable = mono_class_vtable (cfg->domain, method->klass);
5093 /* This makes so that inline cannot trigger */
5094 /* .cctors: too many apps depend on them */
5095 /* running with a specific order... */
5096 if (! vtable->initialized)
5098 mono_runtime_class_init (vtable);
5100 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5101 if (!method->klass->runtime_info)
5102 /* No vtable created yet */
5104 vtable = mono_class_vtable (cfg->domain, method->klass);
5107 if (!vtable->initialized)
5112 * If we're compiling for shared code
5113 * the cctor will need to be run at aot method load time, for example,
5114 * or at the end of the compilation of the inlining method.
5116 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5121 * CAS - do not inline methods with declarative security
5122 * Note: this has to be before any possible return TRUE;
5124 if (mono_security_method_has_declsec (method))
5127 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5128 if (mono_arch_is_soft_float ()) {
5130 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5132 for (i = 0; i < sig->param_count; ++i)
5133 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5138 if (g_list_find (cfg->dont_inline, method))
5145 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5147 if (!cfg->compile_aot) {
5149 if (vtable->initialized)
5153 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5154 if (cfg->method == method)
5158 if (!mono_class_needs_cctor_run (klass, method))
5161 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5162 /* The initialization is already done before the method is called */
5169 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5173 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5176 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5179 mono_class_init (klass);
5180 size = mono_class_array_element_size (klass);
5183 mult_reg = alloc_preg (cfg);
5184 array_reg = arr->dreg;
5185 index_reg = index->dreg;
5187 #if SIZEOF_REGISTER == 8
5188 /* The array reg is 64 bits but the index reg is only 32 */
5189 if (COMPILE_LLVM (cfg)) {
5191 index2_reg = index_reg;
5193 index2_reg = alloc_preg (cfg);
5194 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5197 if (index->type == STACK_I8) {
5198 index2_reg = alloc_preg (cfg);
5199 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5201 index2_reg = index_reg;
5206 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5208 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5209 if (size == 1 || size == 2 || size == 4 || size == 8) {
5210 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5212 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5213 ins->klass = mono_class_get_element_class (klass);
5214 ins->type = STACK_MP;
5220 add_reg = alloc_ireg_mp (cfg);
5223 MonoInst *rgctx_ins;
5226 g_assert (cfg->generic_sharing_context);
5227 context_used = mini_class_check_context_used (cfg, klass);
5228 g_assert (context_used);
5229 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5230 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5232 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5234 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5235 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5236 ins->klass = mono_class_get_element_class (klass);
5237 ins->type = STACK_MP;
5238 MONO_ADD_INS (cfg->cbb, ins);
5243 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5245 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5247 int bounds_reg = alloc_preg (cfg);
5248 int add_reg = alloc_ireg_mp (cfg);
5249 int mult_reg = alloc_preg (cfg);
5250 int mult2_reg = alloc_preg (cfg);
5251 int low1_reg = alloc_preg (cfg);
5252 int low2_reg = alloc_preg (cfg);
5253 int high1_reg = alloc_preg (cfg);
5254 int high2_reg = alloc_preg (cfg);
5255 int realidx1_reg = alloc_preg (cfg);
5256 int realidx2_reg = alloc_preg (cfg);
5257 int sum_reg = alloc_preg (cfg);
5258 int index1, index2, tmpreg;
5262 mono_class_init (klass);
5263 size = mono_class_array_element_size (klass);
5265 index1 = index_ins1->dreg;
5266 index2 = index_ins2->dreg;
5268 #if SIZEOF_REGISTER == 8
5269 /* The array reg is 64 bits but the index reg is only 32 */
5270 if (COMPILE_LLVM (cfg)) {
5273 tmpreg = alloc_preg (cfg);
5274 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5276 tmpreg = alloc_preg (cfg);
5277 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5281 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5285 /* range checking */
5286 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5287 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5289 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5290 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5291 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5293 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5294 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5295 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5297 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5298 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5299 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5300 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5301 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5302 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5303 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5305 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5306 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5307 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5308 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5309 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5311 ins->type = STACK_MP;
5313 MONO_ADD_INS (cfg->cbb, ins);
5320 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5324 MonoMethod *addr_method;
5326 MonoClass *eclass = cmethod->klass->element_class;
5328 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5331 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5333 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5334 /* emit_ldelema_2 depends on OP_LMUL */
5335 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5336 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5340 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5343 element_size = mono_class_array_element_size (eclass);
5344 addr_method = mono_marshal_get_array_address (rank, element_size);
5345 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5350 static MonoBreakPolicy
5351 always_insert_breakpoint (MonoMethod *method)
5353 return MONO_BREAK_POLICY_ALWAYS;
5356 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5359 * mono_set_break_policy:
5360 * policy_callback: the new callback function
5362 * Allow embedders to decide wherther to actually obey breakpoint instructions
5363 * (both break IL instructions and Debugger.Break () method calls), for example
5364 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5365 * untrusted or semi-trusted code.
5367 * @policy_callback will be called every time a break point instruction needs to
5368 * be inserted with the method argument being the method that calls Debugger.Break()
5369 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5370 * if it wants the breakpoint to not be effective in the given method.
5371 * #MONO_BREAK_POLICY_ALWAYS is the default.
5374 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5376 if (policy_callback)
5377 break_policy_func = policy_callback;
5379 break_policy_func = always_insert_breakpoint;
5383 should_insert_brekpoint (MonoMethod *method) {
5384 switch (break_policy_func (method)) {
5385 case MONO_BREAK_POLICY_ALWAYS:
5387 case MONO_BREAK_POLICY_NEVER:
5389 case MONO_BREAK_POLICY_ON_DBG:
5390 g_warning ("mdb no longer supported");
5393 g_warning ("Incorrect value returned from break policy callback");
5398 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5400 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5402 MonoInst *addr, *store, *load;
5403 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5405 /* the bounds check is already done by the callers */
5406 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5408 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5409 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5410 if (mini_type_is_reference (cfg, fsig->params [2]))
5411 emit_write_barrier (cfg, addr, load);
5413 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5414 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5421 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5423 return mini_type_is_reference (cfg, &klass->byval_arg);
5427 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5429 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5430 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5431 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5432 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5433 MonoInst *iargs [3];
5436 mono_class_setup_vtable (obj_array);
5437 g_assert (helper->slot);
5439 if (sp [0]->type != STACK_OBJ)
5441 if (sp [2]->type != STACK_OBJ)
5448 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5452 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5455 // FIXME-VT: OP_ICONST optimization
5456 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5457 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5458 ins->opcode = OP_STOREV_MEMBASE;
5459 } else if (sp [1]->opcode == OP_ICONST) {
5460 int array_reg = sp [0]->dreg;
5461 int index_reg = sp [1]->dreg;
5462 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5465 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5466 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5468 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5469 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5470 if (generic_class_is_reference_type (cfg, klass))
5471 emit_write_barrier (cfg, addr, sp [2]);
5478 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5483 eklass = mono_class_from_mono_type (fsig->params [2]);
5485 eklass = mono_class_from_mono_type (fsig->ret);
5488 return emit_array_store (cfg, eklass, args, FALSE);
5490 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5491 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5497 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5501 //Only allow for valuetypes
5502 if (!param_klass->valuetype || !return_klass->valuetype)
5506 if (param_klass->has_references || return_klass->has_references)
5509 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5510 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5511 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5514 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5515 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5518 //And have the same size
5519 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5525 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5527 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5528 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5530 //Valuetypes that are semantically equivalent
5531 if (is_unsafe_mov_compatible (param_klass, return_klass))
5534 //Arrays of valuetypes that are semantically equivalent
5535 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5542 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5544 #ifdef MONO_ARCH_SIMD_INTRINSICS
5545 MonoInst *ins = NULL;
5547 if (cfg->opt & MONO_OPT_SIMD) {
5548 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5554 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5558 emit_memory_barrier (MonoCompile *cfg, int kind)
5560 MonoInst *ins = NULL;
5561 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5562 MONO_ADD_INS (cfg->cbb, ins);
5563 ins->backend.memory_barrier_kind = kind;
5569 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5571 MonoInst *ins = NULL;
5574 /* The LLVM backend supports these intrinsics */
5575 if (cmethod->klass == mono_defaults.math_class) {
5576 if (strcmp (cmethod->name, "Sin") == 0) {
5578 } else if (strcmp (cmethod->name, "Cos") == 0) {
5580 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5582 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5586 if (opcode && fsig->param_count == 1) {
5587 MONO_INST_NEW (cfg, ins, opcode);
5588 ins->type = STACK_R8;
5589 ins->dreg = mono_alloc_freg (cfg);
5590 ins->sreg1 = args [0]->dreg;
5591 MONO_ADD_INS (cfg->cbb, ins);
5595 if (cfg->opt & MONO_OPT_CMOV) {
5596 if (strcmp (cmethod->name, "Min") == 0) {
5597 if (fsig->params [0]->type == MONO_TYPE_I4)
5599 if (fsig->params [0]->type == MONO_TYPE_U4)
5600 opcode = OP_IMIN_UN;
5601 else if (fsig->params [0]->type == MONO_TYPE_I8)
5603 else if (fsig->params [0]->type == MONO_TYPE_U8)
5604 opcode = OP_LMIN_UN;
5605 } else if (strcmp (cmethod->name, "Max") == 0) {
5606 if (fsig->params [0]->type == MONO_TYPE_I4)
5608 if (fsig->params [0]->type == MONO_TYPE_U4)
5609 opcode = OP_IMAX_UN;
5610 else if (fsig->params [0]->type == MONO_TYPE_I8)
5612 else if (fsig->params [0]->type == MONO_TYPE_U8)
5613 opcode = OP_LMAX_UN;
5617 if (opcode && fsig->param_count == 2) {
5618 MONO_INST_NEW (cfg, ins, opcode);
5619 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5620 ins->dreg = mono_alloc_ireg (cfg);
5621 ins->sreg1 = args [0]->dreg;
5622 ins->sreg2 = args [1]->dreg;
5623 MONO_ADD_INS (cfg->cbb, ins);
5631 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5633 if (cmethod->klass == mono_defaults.array_class) {
5634 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5635 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5636 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5637 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5638 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5639 return emit_array_unsafe_mov (cfg, fsig, args);
5646 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5648 MonoInst *ins = NULL;
5650 static MonoClass *runtime_helpers_class = NULL;
5651 if (! runtime_helpers_class)
5652 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5653 "System.Runtime.CompilerServices", "RuntimeHelpers");
5655 if (cmethod->klass == mono_defaults.string_class) {
5656 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
5657 int dreg = alloc_ireg (cfg);
5658 int index_reg = alloc_preg (cfg);
5659 int mult_reg = alloc_preg (cfg);
5660 int add_reg = alloc_preg (cfg);
5662 #if SIZEOF_REGISTER == 8
5663 /* The array reg is 64 bits but the index reg is only 32 */
5664 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5666 index_reg = args [1]->dreg;
5668 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5670 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5671 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5672 add_reg = ins->dreg;
5673 /* Avoid a warning */
5675 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5679 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5680 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5681 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5683 type_from_op (cfg, ins, NULL, NULL);
5685 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5686 int dreg = alloc_ireg (cfg);
5687 /* Decompose later to allow more optimizations */
5688 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5689 ins->type = STACK_I4;
5690 ins->flags |= MONO_INST_FAULT;
5691 cfg->cbb->has_array_access = TRUE;
5692 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5695 } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
5696 int mult_reg = alloc_preg (cfg);
5697 int add_reg = alloc_preg (cfg);
5699 /* The corlib functions check for oob already. */
5700 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5701 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5702 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5703 return cfg->cbb->last_ins;
5706 } else if (cmethod->klass == mono_defaults.object_class) {
5708 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
5709 int dreg = alloc_ireg_ref (cfg);
5710 int vt_reg = alloc_preg (cfg);
5711 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5712 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5713 type_from_op (cfg, ins, NULL, NULL);
5716 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5717 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5718 int dreg = alloc_ireg (cfg);
5719 int t1 = alloc_ireg (cfg);
5721 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5722 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5723 ins->type = STACK_I4;
5727 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5728 MONO_INST_NEW (cfg, ins, OP_NOP);
5729 MONO_ADD_INS (cfg->cbb, ins);
5733 } else if (cmethod->klass == mono_defaults.array_class) {
5734 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5735 return emit_array_generic_access (cfg, fsig, args, FALSE);
5736 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5737 return emit_array_generic_access (cfg, fsig, args, TRUE);
5739 #ifndef MONO_BIG_ARRAYS
5741 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5744 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
5745 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
5746 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5747 int dreg = alloc_ireg (cfg);
5748 int bounds_reg = alloc_ireg_mp (cfg);
5749 MonoBasicBlock *end_bb, *szarray_bb;
5750 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5752 NEW_BBLOCK (cfg, end_bb);
5753 NEW_BBLOCK (cfg, szarray_bb);
5755 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5756 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5757 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5758 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5759 /* Non-szarray case */
5761 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5762 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5764 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5765 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5766 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5767 MONO_START_BB (cfg, szarray_bb);
5770 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5771 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5773 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5774 MONO_START_BB (cfg, end_bb);
5776 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5777 ins->type = STACK_I4;
5783 if (cmethod->name [0] != 'g')
5786 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
5787 int dreg = alloc_ireg (cfg);
5788 int vtable_reg = alloc_preg (cfg);
5789 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5790 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5791 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5792 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5793 type_from_op (cfg, ins, NULL, NULL);
5796 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5797 int dreg = alloc_ireg (cfg);
5799 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5800 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5801 type_from_op (cfg, ins, NULL, NULL);
5806 } else if (cmethod->klass == runtime_helpers_class) {
5808 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5809 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5813 } else if (cmethod->klass == mono_defaults.thread_class) {
5814 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5815 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5816 MONO_ADD_INS (cfg->cbb, ins);
5818 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5819 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5820 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5822 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5824 if (fsig->params [0]->type == MONO_TYPE_I1)
5825 opcode = OP_LOADI1_MEMBASE;
5826 else if (fsig->params [0]->type == MONO_TYPE_U1)
5827 opcode = OP_LOADU1_MEMBASE;
5828 else if (fsig->params [0]->type == MONO_TYPE_I2)
5829 opcode = OP_LOADI2_MEMBASE;
5830 else if (fsig->params [0]->type == MONO_TYPE_U2)
5831 opcode = OP_LOADU2_MEMBASE;
5832 else if (fsig->params [0]->type == MONO_TYPE_I4)
5833 opcode = OP_LOADI4_MEMBASE;
5834 else if (fsig->params [0]->type == MONO_TYPE_U4)
5835 opcode = OP_LOADU4_MEMBASE;
5836 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5837 opcode = OP_LOADI8_MEMBASE;
5838 else if (fsig->params [0]->type == MONO_TYPE_R4)
5839 opcode = OP_LOADR4_MEMBASE;
5840 else if (fsig->params [0]->type == MONO_TYPE_R8)
5841 opcode = OP_LOADR8_MEMBASE;
5842 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5843 opcode = OP_LOAD_MEMBASE;
5846 MONO_INST_NEW (cfg, ins, opcode);
5847 ins->inst_basereg = args [0]->dreg;
5848 ins->inst_offset = 0;
5849 MONO_ADD_INS (cfg->cbb, ins);
5851 switch (fsig->params [0]->type) {
5858 ins->dreg = mono_alloc_ireg (cfg);
5859 ins->type = STACK_I4;
5863 ins->dreg = mono_alloc_lreg (cfg);
5864 ins->type = STACK_I8;
5868 ins->dreg = mono_alloc_ireg (cfg);
5869 #if SIZEOF_REGISTER == 8
5870 ins->type = STACK_I8;
5872 ins->type = STACK_I4;
5877 ins->dreg = mono_alloc_freg (cfg);
5878 ins->type = STACK_R8;
5881 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5882 ins->dreg = mono_alloc_ireg_ref (cfg);
5883 ins->type = STACK_OBJ;
5887 if (opcode == OP_LOADI8_MEMBASE)
5888 ins = mono_decompose_opcode (cfg, ins, NULL);
5890 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5894 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5896 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5898 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5899 opcode = OP_STOREI1_MEMBASE_REG;
5900 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5901 opcode = OP_STOREI2_MEMBASE_REG;
5902 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5903 opcode = OP_STOREI4_MEMBASE_REG;
5904 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5905 opcode = OP_STOREI8_MEMBASE_REG;
5906 else if (fsig->params [0]->type == MONO_TYPE_R4)
5907 opcode = OP_STORER4_MEMBASE_REG;
5908 else if (fsig->params [0]->type == MONO_TYPE_R8)
5909 opcode = OP_STORER8_MEMBASE_REG;
5910 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5911 opcode = OP_STORE_MEMBASE_REG;
5914 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5916 MONO_INST_NEW (cfg, ins, opcode);
5917 ins->sreg1 = args [1]->dreg;
5918 ins->inst_destbasereg = args [0]->dreg;
5919 ins->inst_offset = 0;
5920 MONO_ADD_INS (cfg->cbb, ins);
5922 if (opcode == OP_STOREI8_MEMBASE_REG)
5923 ins = mono_decompose_opcode (cfg, ins, NULL);
5928 } else if (cmethod->klass == mono_defaults.monitor_class) {
5929 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5930 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5933 if (COMPILE_LLVM (cfg)) {
5935 * Pass the argument normally, the LLVM backend will handle the
5936 * calling convention problems.
5938 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5940 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5941 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5942 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5943 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5946 return (MonoInst*)call;
5947 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5948 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5951 if (COMPILE_LLVM (cfg)) {
5953 * Pass the argument normally, the LLVM backend will handle the
5954 * calling convention problems.
5956 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5958 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5959 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5960 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5961 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5964 return (MonoInst*)call;
5966 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
5969 if (COMPILE_LLVM (cfg)) {
5970 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5972 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5973 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5974 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5975 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5978 return (MonoInst*)call;
5981 } else if (cmethod->klass->image == mono_defaults.corlib &&
5982 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5983 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5986 #if SIZEOF_REGISTER == 8
5987 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5988 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5989 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5990 ins->dreg = mono_alloc_preg (cfg);
5991 ins->sreg1 = args [0]->dreg;
5992 ins->type = STACK_I8;
5993 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5994 MONO_ADD_INS (cfg->cbb, ins);
5998 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6000 /* 64 bit reads are already atomic */
6001 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6002 load_ins->dreg = mono_alloc_preg (cfg);
6003 load_ins->inst_basereg = args [0]->dreg;
6004 load_ins->inst_offset = 0;
6005 load_ins->type = STACK_I8;
6006 MONO_ADD_INS (cfg->cbb, load_ins);
6008 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6015 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6016 MonoInst *ins_iconst;
6019 if (fsig->params [0]->type == MONO_TYPE_I4) {
6020 opcode = OP_ATOMIC_ADD_I4;
6021 cfg->has_atomic_add_i4 = TRUE;
6023 #if SIZEOF_REGISTER == 8
6024 else if (fsig->params [0]->type == MONO_TYPE_I8)
6025 opcode = OP_ATOMIC_ADD_I8;
6028 if (!mono_arch_opcode_supported (opcode))
6030 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6031 ins_iconst->inst_c0 = 1;
6032 ins_iconst->dreg = mono_alloc_ireg (cfg);
6033 MONO_ADD_INS (cfg->cbb, ins_iconst);
6035 MONO_INST_NEW (cfg, ins, opcode);
6036 ins->dreg = mono_alloc_ireg (cfg);
6037 ins->inst_basereg = args [0]->dreg;
6038 ins->inst_offset = 0;
6039 ins->sreg2 = ins_iconst->dreg;
6040 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6041 MONO_ADD_INS (cfg->cbb, ins);
6043 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6044 MonoInst *ins_iconst;
6047 if (fsig->params [0]->type == MONO_TYPE_I4) {
6048 opcode = OP_ATOMIC_ADD_I4;
6049 cfg->has_atomic_add_i4 = TRUE;
6051 #if SIZEOF_REGISTER == 8
6052 else if (fsig->params [0]->type == MONO_TYPE_I8)
6053 opcode = OP_ATOMIC_ADD_I8;
6056 if (!mono_arch_opcode_supported (opcode))
6058 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6059 ins_iconst->inst_c0 = -1;
6060 ins_iconst->dreg = mono_alloc_ireg (cfg);
6061 MONO_ADD_INS (cfg->cbb, ins_iconst);
6063 MONO_INST_NEW (cfg, ins, opcode);
6064 ins->dreg = mono_alloc_ireg (cfg);
6065 ins->inst_basereg = args [0]->dreg;
6066 ins->inst_offset = 0;
6067 ins->sreg2 = ins_iconst->dreg;
6068 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6069 MONO_ADD_INS (cfg->cbb, ins);
6071 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6074 if (fsig->params [0]->type == MONO_TYPE_I4) {
6075 opcode = OP_ATOMIC_ADD_I4;
6076 cfg->has_atomic_add_i4 = TRUE;
6078 #if SIZEOF_REGISTER == 8
6079 else if (fsig->params [0]->type == MONO_TYPE_I8)
6080 opcode = OP_ATOMIC_ADD_I8;
6083 if (!mono_arch_opcode_supported (opcode))
6085 MONO_INST_NEW (cfg, ins, opcode);
6086 ins->dreg = mono_alloc_ireg (cfg);
6087 ins->inst_basereg = args [0]->dreg;
6088 ins->inst_offset = 0;
6089 ins->sreg2 = args [1]->dreg;
6090 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6091 MONO_ADD_INS (cfg->cbb, ins);
6094 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6095 MonoInst *f2i = NULL, *i2f;
6096 guint32 opcode, f2i_opcode, i2f_opcode;
6097 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6098 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6100 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6101 fsig->params [0]->type == MONO_TYPE_R4) {
6102 opcode = OP_ATOMIC_EXCHANGE_I4;
6103 f2i_opcode = OP_MOVE_F_TO_I4;
6104 i2f_opcode = OP_MOVE_I4_TO_F;
6105 cfg->has_atomic_exchange_i4 = TRUE;
6107 #if SIZEOF_REGISTER == 8
6109 fsig->params [0]->type == MONO_TYPE_I8 ||
6110 fsig->params [0]->type == MONO_TYPE_R8 ||
6111 fsig->params [0]->type == MONO_TYPE_I) {
6112 opcode = OP_ATOMIC_EXCHANGE_I8;
6113 f2i_opcode = OP_MOVE_F_TO_I8;
6114 i2f_opcode = OP_MOVE_I8_TO_F;
6117 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6118 opcode = OP_ATOMIC_EXCHANGE_I4;
6119 cfg->has_atomic_exchange_i4 = TRUE;
6125 if (!mono_arch_opcode_supported (opcode))
6129 /* TODO: Decompose these opcodes instead of bailing here. */
6130 if (COMPILE_SOFT_FLOAT (cfg))
6133 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6134 f2i->dreg = mono_alloc_ireg (cfg);
6135 f2i->sreg1 = args [1]->dreg;
6136 if (f2i_opcode == OP_MOVE_F_TO_I4)
6137 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6138 MONO_ADD_INS (cfg->cbb, f2i);
6141 MONO_INST_NEW (cfg, ins, opcode);
6142 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6143 ins->inst_basereg = args [0]->dreg;
6144 ins->inst_offset = 0;
6145 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6146 MONO_ADD_INS (cfg->cbb, ins);
6148 switch (fsig->params [0]->type) {
6150 ins->type = STACK_I4;
6153 ins->type = STACK_I8;
6156 #if SIZEOF_REGISTER == 8
6157 ins->type = STACK_I8;
6159 ins->type = STACK_I4;
6164 ins->type = STACK_R8;
6167 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6168 ins->type = STACK_OBJ;
6173 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6174 i2f->dreg = mono_alloc_freg (cfg);
6175 i2f->sreg1 = ins->dreg;
6176 i2f->type = STACK_R8;
6177 if (i2f_opcode == OP_MOVE_I4_TO_F)
6178 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6179 MONO_ADD_INS (cfg->cbb, i2f);
6184 if (cfg->gen_write_barriers && is_ref)
6185 emit_write_barrier (cfg, args [0], args [1]);
6187 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6188 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6189 guint32 opcode, f2i_opcode, i2f_opcode;
6190 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6191 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6193 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6194 fsig->params [1]->type == MONO_TYPE_R4) {
6195 opcode = OP_ATOMIC_CAS_I4;
6196 f2i_opcode = OP_MOVE_F_TO_I4;
6197 i2f_opcode = OP_MOVE_I4_TO_F;
6198 cfg->has_atomic_cas_i4 = TRUE;
6200 #if SIZEOF_REGISTER == 8
6202 fsig->params [1]->type == MONO_TYPE_I8 ||
6203 fsig->params [1]->type == MONO_TYPE_R8 ||
6204 fsig->params [1]->type == MONO_TYPE_I) {
6205 opcode = OP_ATOMIC_CAS_I8;
6206 f2i_opcode = OP_MOVE_F_TO_I8;
6207 i2f_opcode = OP_MOVE_I8_TO_F;
6210 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6211 opcode = OP_ATOMIC_CAS_I4;
6212 cfg->has_atomic_cas_i4 = TRUE;
6218 if (!mono_arch_opcode_supported (opcode))
6222 /* TODO: Decompose these opcodes instead of bailing here. */
6223 if (COMPILE_SOFT_FLOAT (cfg))
6226 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6227 f2i_new->dreg = mono_alloc_ireg (cfg);
6228 f2i_new->sreg1 = args [1]->dreg;
6229 if (f2i_opcode == OP_MOVE_F_TO_I4)
6230 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6231 MONO_ADD_INS (cfg->cbb, f2i_new);
6233 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6234 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6235 f2i_cmp->sreg1 = args [2]->dreg;
6236 if (f2i_opcode == OP_MOVE_F_TO_I4)
6237 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6238 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6241 MONO_INST_NEW (cfg, ins, opcode);
6242 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6243 ins->sreg1 = args [0]->dreg;
6244 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6245 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6246 MONO_ADD_INS (cfg->cbb, ins);
6248 switch (fsig->params [0]->type) {
6250 ins->type = STACK_I4;
6253 ins->type = STACK_I8;
6256 #if SIZEOF_REGISTER == 8
6257 ins->type = STACK_I8;
6259 ins->type = STACK_I4;
6264 ins->type = STACK_R8;
6267 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6268 ins->type = STACK_OBJ;
6273 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6274 i2f->dreg = mono_alloc_freg (cfg);
6275 i2f->sreg1 = ins->dreg;
6276 i2f->type = STACK_R8;
6277 if (i2f_opcode == OP_MOVE_I4_TO_F)
6278 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6279 MONO_ADD_INS (cfg->cbb, i2f);
6284 if (cfg->gen_write_barriers && is_ref)
6285 emit_write_barrier (cfg, args [0], args [1]);
6287 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6288 fsig->params [1]->type == MONO_TYPE_I4) {
6289 MonoInst *cmp, *ceq;
6291 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6294 /* int32 r = CAS (location, value, comparand); */
6295 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6296 ins->dreg = alloc_ireg (cfg);
6297 ins->sreg1 = args [0]->dreg;
6298 ins->sreg2 = args [1]->dreg;
6299 ins->sreg3 = args [2]->dreg;
6300 ins->type = STACK_I4;
6301 MONO_ADD_INS (cfg->cbb, ins);
6303 /* bool result = r == comparand; */
6304 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6305 cmp->sreg1 = ins->dreg;
6306 cmp->sreg2 = args [2]->dreg;
6307 cmp->type = STACK_I4;
6308 MONO_ADD_INS (cfg->cbb, cmp);
6310 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6311 ceq->dreg = alloc_ireg (cfg);
6312 ceq->type = STACK_I4;
6313 MONO_ADD_INS (cfg->cbb, ceq);
6315 /* *success = result; */
6316 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6318 cfg->has_atomic_cas_i4 = TRUE;
6320 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6321 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6325 } else if (cmethod->klass->image == mono_defaults.corlib &&
6326 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6327 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6330 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6332 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6333 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6335 if (fsig->params [0]->type == MONO_TYPE_I1)
6336 opcode = OP_ATOMIC_LOAD_I1;
6337 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6338 opcode = OP_ATOMIC_LOAD_U1;
6339 else if (fsig->params [0]->type == MONO_TYPE_I2)
6340 opcode = OP_ATOMIC_LOAD_I2;
6341 else if (fsig->params [0]->type == MONO_TYPE_U2)
6342 opcode = OP_ATOMIC_LOAD_U2;
6343 else if (fsig->params [0]->type == MONO_TYPE_I4)
6344 opcode = OP_ATOMIC_LOAD_I4;
6345 else if (fsig->params [0]->type == MONO_TYPE_U4)
6346 opcode = OP_ATOMIC_LOAD_U4;
6347 else if (fsig->params [0]->type == MONO_TYPE_R4)
6348 opcode = OP_ATOMIC_LOAD_R4;
6349 else if (fsig->params [0]->type == MONO_TYPE_R8)
6350 opcode = OP_ATOMIC_LOAD_R8;
6351 #if SIZEOF_REGISTER == 8
6352 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6353 opcode = OP_ATOMIC_LOAD_I8;
6354 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6355 opcode = OP_ATOMIC_LOAD_U8;
6357 else if (fsig->params [0]->type == MONO_TYPE_I)
6358 opcode = OP_ATOMIC_LOAD_I4;
6359 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6360 opcode = OP_ATOMIC_LOAD_U4;
6364 if (!mono_arch_opcode_supported (opcode))
6367 MONO_INST_NEW (cfg, ins, opcode);
6368 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6369 ins->sreg1 = args [0]->dreg;
6370 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6371 MONO_ADD_INS (cfg->cbb, ins);
6373 switch (fsig->params [0]->type) {
6374 case MONO_TYPE_BOOLEAN:
6381 ins->type = STACK_I4;
6385 ins->type = STACK_I8;
6389 #if SIZEOF_REGISTER == 8
6390 ins->type = STACK_I8;
6392 ins->type = STACK_I4;
6397 ins->type = STACK_R8;
6400 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6401 ins->type = STACK_OBJ;
6407 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6409 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6411 if (fsig->params [0]->type == MONO_TYPE_I1)
6412 opcode = OP_ATOMIC_STORE_I1;
6413 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6414 opcode = OP_ATOMIC_STORE_U1;
6415 else if (fsig->params [0]->type == MONO_TYPE_I2)
6416 opcode = OP_ATOMIC_STORE_I2;
6417 else if (fsig->params [0]->type == MONO_TYPE_U2)
6418 opcode = OP_ATOMIC_STORE_U2;
6419 else if (fsig->params [0]->type == MONO_TYPE_I4)
6420 opcode = OP_ATOMIC_STORE_I4;
6421 else if (fsig->params [0]->type == MONO_TYPE_U4)
6422 opcode = OP_ATOMIC_STORE_U4;
6423 else if (fsig->params [0]->type == MONO_TYPE_R4)
6424 opcode = OP_ATOMIC_STORE_R4;
6425 else if (fsig->params [0]->type == MONO_TYPE_R8)
6426 opcode = OP_ATOMIC_STORE_R8;
6427 #if SIZEOF_REGISTER == 8
6428 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6429 opcode = OP_ATOMIC_STORE_I8;
6430 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6431 opcode = OP_ATOMIC_STORE_U8;
6433 else if (fsig->params [0]->type == MONO_TYPE_I)
6434 opcode = OP_ATOMIC_STORE_I4;
6435 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6436 opcode = OP_ATOMIC_STORE_U4;
6440 if (!mono_arch_opcode_supported (opcode))
6443 MONO_INST_NEW (cfg, ins, opcode);
6444 ins->dreg = args [0]->dreg;
6445 ins->sreg1 = args [1]->dreg;
6446 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6447 MONO_ADD_INS (cfg->cbb, ins);
6449 if (cfg->gen_write_barriers && is_ref)
6450 emit_write_barrier (cfg, args [0], args [1]);
6456 } else if (cmethod->klass->image == mono_defaults.corlib &&
6457 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6458 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6459 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6460 if (should_insert_brekpoint (cfg->method)) {
6461 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6463 MONO_INST_NEW (cfg, ins, OP_NOP);
6464 MONO_ADD_INS (cfg->cbb, ins);
6468 } else if (cmethod->klass->image == mono_defaults.corlib &&
6469 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6470 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6471 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6473 EMIT_NEW_ICONST (cfg, ins, 1);
6475 EMIT_NEW_ICONST (cfg, ins, 0);
6478 } else if (cmethod->klass == mono_defaults.math_class) {
6480 * There is general branchless code for Min/Max, but it does not work for
6482 * http://everything2.com/?node_id=1051618
6484 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6485 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6486 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6487 !strcmp (cmethod->klass->name, "Selector")) {
6488 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6489 if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
6490 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6493 MonoJumpInfoToken *ji;
6496 cfg->disable_llvm = TRUE;
6498 if (args [0]->opcode == OP_GOT_ENTRY) {
6499 pi = args [0]->inst_p1;
6500 g_assert (pi->opcode == OP_PATCH_INFO);
6501 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6504 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6505 ji = args [0]->inst_p0;
6508 NULLIFY_INS (args [0]);
6511 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6512 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6513 ins->dreg = mono_alloc_ireg (cfg);
6515 ins->inst_p0 = mono_string_to_utf8 (s);
6516 MONO_ADD_INS (cfg->cbb, ins);
6522 #ifdef MONO_ARCH_SIMD_INTRINSICS
6523 if (cfg->opt & MONO_OPT_SIMD) {
6524 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6530 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6534 if (COMPILE_LLVM (cfg)) {
6535 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6540 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6544 * This entry point could be used later for arbitrary method
6547 inline static MonoInst*
6548 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6549 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6551 if (method->klass == mono_defaults.string_class) {
6552 /* managed string allocation support */
6553 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6554 MonoInst *iargs [2];
6555 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6556 MonoMethod *managed_alloc = NULL;
6558 g_assert (vtable); /*Should not fail since it System.String*/
6559 #ifndef MONO_CROSS_COMPILE
6560 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6564 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6565 iargs [1] = args [0];
6566 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6573 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6575 MonoInst *store, *temp;
6578 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6579 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6582 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6583 * would be different than the MonoInst's used to represent arguments, and
6584 * the ldelema implementation can't deal with that.
6585 * Solution: When ldelema is used on an inline argument, create a var for
6586 * it, emit ldelema on that var, and emit the saving code below in
6587 * inline_method () if needed.
6589 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6590 cfg->args [i] = temp;
6591 /* This uses cfg->args [i] which is set by the preceeding line */
6592 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6593 store->cil_code = sp [0]->cil_code;
6598 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6599 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6601 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6603 check_inline_called_method_name_limit (MonoMethod *called_method)
6606 static const char *limit = NULL;
6608 if (limit == NULL) {
6609 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6611 if (limit_string != NULL)
6612 limit = limit_string;
6617 if (limit [0] != '\0') {
6618 char *called_method_name = mono_method_full_name (called_method, TRUE);
6620 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6621 g_free (called_method_name);
6623 //return (strncmp_result <= 0);
6624 return (strncmp_result == 0);
6631 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6633 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6636 static const char *limit = NULL;
6638 if (limit == NULL) {
6639 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6640 if (limit_string != NULL) {
6641 limit = limit_string;
6647 if (limit [0] != '\0') {
6648 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6650 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6651 g_free (caller_method_name);
6653 //return (strncmp_result <= 0);
6654 return (strncmp_result == 0);
6662 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6664 static double r8_0 = 0.0;
6665 static float r4_0 = 0.0;
6669 rtype = mini_get_underlying_type (cfg, rtype);
6673 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6674 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6675 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6676 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6677 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6678 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6679 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6680 ins->type = STACK_R4;
6681 ins->inst_p0 = (void*)&r4_0;
6683 MONO_ADD_INS (cfg->cbb, ins);
6684 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6685 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6686 ins->type = STACK_R8;
6687 ins->inst_p0 = (void*)&r8_0;
6689 MONO_ADD_INS (cfg->cbb, ins);
6690 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6691 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6692 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6693 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6694 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6696 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6701 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6705 rtype = mini_get_underlying_type (cfg, rtype);
6709 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6710 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6711 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6712 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6713 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6714 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6715 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6716 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6717 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6718 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6719 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6720 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6721 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6722 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6724 emit_init_rvar (cfg, dreg, rtype);
6728 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6730 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6732 MonoInst *var = cfg->locals [local];
6733 if (COMPILE_SOFT_FLOAT (cfg)) {
6735 int reg = alloc_dreg (cfg, var->type);
6736 emit_init_rvar (cfg, reg, type);
6737 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6740 emit_init_rvar (cfg, var->dreg, type);
6742 emit_dummy_init_rvar (cfg, var->dreg, type);
6749 * Return the cost of inlining CMETHOD.
6752 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6753 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6755 MonoInst *ins, *rvar = NULL;
6756 MonoMethodHeader *cheader;
6757 MonoBasicBlock *ebblock, *sbblock;
6759 MonoMethod *prev_inlined_method;
6760 MonoInst **prev_locals, **prev_args;
6761 MonoType **prev_arg_types;
6762 guint prev_real_offset;
6763 GHashTable *prev_cbb_hash;
6764 MonoBasicBlock **prev_cil_offset_to_bb;
6765 MonoBasicBlock *prev_cbb;
6766 unsigned char* prev_cil_start;
6767 guint32 prev_cil_offset_to_bb_len;
6768 MonoMethod *prev_current_method;
6769 MonoGenericContext *prev_generic_context;
6770 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6772 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6774 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6775 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6778 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6779 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6784 fsig = mono_method_signature (cmethod);
6786 if (cfg->verbose_level > 2)
6787 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6789 if (!cmethod->inline_info) {
6790 cfg->stat_inlineable_methods++;
6791 cmethod->inline_info = 1;
6794 /* allocate local variables */
6795 cheader = mono_method_get_header (cmethod);
6797 if (cheader == NULL || mono_loader_get_last_error ()) {
6798 MonoLoaderError *error = mono_loader_get_last_error ();
6801 mono_metadata_free_mh (cheader);
6802 if (inline_always && error)
6803 mono_cfg_set_exception (cfg, error->exception_type);
6805 mono_loader_clear_error ();
6809 /*Must verify before creating locals as it can cause the JIT to assert.*/
6810 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6811 mono_metadata_free_mh (cheader);
6815 /* allocate space to store the return value */
6816 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6817 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6820 prev_locals = cfg->locals;
6821 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6822 for (i = 0; i < cheader->num_locals; ++i)
6823 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6825 /* allocate start and end blocks */
6826 /* This is needed so if the inline is aborted, we can clean up */
6827 NEW_BBLOCK (cfg, sbblock);
6828 sbblock->real_offset = real_offset;
6830 NEW_BBLOCK (cfg, ebblock);
6831 ebblock->block_num = cfg->num_bblocks++;
6832 ebblock->real_offset = real_offset;
6834 prev_args = cfg->args;
6835 prev_arg_types = cfg->arg_types;
6836 prev_inlined_method = cfg->inlined_method;
6837 cfg->inlined_method = cmethod;
6838 cfg->ret_var_set = FALSE;
6839 cfg->inline_depth ++;
6840 prev_real_offset = cfg->real_offset;
6841 prev_cbb_hash = cfg->cbb_hash;
6842 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6843 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6844 prev_cil_start = cfg->cil_start;
6845 prev_cbb = cfg->cbb;
6846 prev_current_method = cfg->current_method;
6847 prev_generic_context = cfg->generic_context;
6848 prev_ret_var_set = cfg->ret_var_set;
6849 prev_disable_inline = cfg->disable_inline;
6851 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6854 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6856 ret_var_set = cfg->ret_var_set;
6858 cfg->inlined_method = prev_inlined_method;
6859 cfg->real_offset = prev_real_offset;
6860 cfg->cbb_hash = prev_cbb_hash;
6861 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6862 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6863 cfg->cil_start = prev_cil_start;
6864 cfg->locals = prev_locals;
6865 cfg->args = prev_args;
6866 cfg->arg_types = prev_arg_types;
6867 cfg->current_method = prev_current_method;
6868 cfg->generic_context = prev_generic_context;
6869 cfg->ret_var_set = prev_ret_var_set;
6870 cfg->disable_inline = prev_disable_inline;
6871 cfg->inline_depth --;
6873 if ((costs >= 0 && costs < 60) || inline_always) {
6874 if (cfg->verbose_level > 2)
6875 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6877 cfg->stat_inlined_methods++;
6879 /* always add some code to avoid block split failures */
6880 MONO_INST_NEW (cfg, ins, OP_NOP);
6881 MONO_ADD_INS (prev_cbb, ins);
6883 prev_cbb->next_bb = sbblock;
6884 link_bblock (cfg, prev_cbb, sbblock);
6887 * Get rid of the begin and end bblocks if possible to aid local
6890 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6892 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6893 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6895 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6896 MonoBasicBlock *prev = ebblock->in_bb [0];
6897 mono_merge_basic_blocks (cfg, prev, ebblock);
6899 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6900 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6901 cfg->cbb = prev_cbb;
6905 * Its possible that the rvar is set in some prev bblock, but not in others.
6911 for (i = 0; i < ebblock->in_count; ++i) {
6912 bb = ebblock->in_bb [i];
6914 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6917 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6926 *out_cbb = cfg->cbb;
6930 * If the inlined method contains only a throw, then the ret var is not
6931 * set, so set it to a dummy value.
6934 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6936 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6939 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6942 if (cfg->verbose_level > 2)
6943 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6944 cfg->exception_type = MONO_EXCEPTION_NONE;
6945 mono_loader_clear_error ();
6947 /* This gets rid of the newly added bblocks */
6948 cfg->cbb = prev_cbb;
6950 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6955 * Some of these comments may well be out-of-date.
6956 * Design decisions: we do a single pass over the IL code (and we do bblock
6957 * splitting/merging in the few cases when it's required: a back jump to an IL
6958 * address that was not already seen as bblock starting point).
6959 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6960 * Complex operations are decomposed in simpler ones right away. We need to let the
6961 * arch-specific code peek and poke inside this process somehow (except when the
6962 * optimizations can take advantage of the full semantic info of coarse opcodes).
6963 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6964 * MonoInst->opcode initially is the IL opcode or some simplification of that
6965 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6966 * opcode with value bigger than OP_LAST.
6967 * At this point the IR can be handed over to an interpreter, a dumb code generator
6968 * or to the optimizing code generator that will translate it to SSA form.
6970 * Profiling directed optimizations.
6971 * We may compile by default with few or no optimizations and instrument the code
6972 * or the user may indicate what methods to optimize the most either in a config file
6973 * or through repeated runs where the compiler applies offline the optimizations to
6974 * each method and then decides if it was worth it.
6977 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6978 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6979 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6980 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6981 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6982 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6983 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6984 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6986 /* offset from br.s -> br like opcodes */
6987 #define BIG_BRANCH_OFFSET 13
6990 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6992 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6994 return b == NULL || b == bb;
6998 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7000 unsigned char *ip = start;
7001 unsigned char *target;
7004 MonoBasicBlock *bblock;
7005 const MonoOpcode *opcode;
7008 cli_addr = ip - start;
7009 i = mono_opcode_value ((const guint8 **)&ip, end);
7012 opcode = &mono_opcodes [i];
7013 switch (opcode->argument) {
7014 case MonoInlineNone:
7017 case MonoInlineString:
7018 case MonoInlineType:
7019 case MonoInlineField:
7020 case MonoInlineMethod:
7023 case MonoShortInlineR:
7030 case MonoShortInlineVar:
7031 case MonoShortInlineI:
7034 case MonoShortInlineBrTarget:
7035 target = start + cli_addr + 2 + (signed char)ip [1];
7036 GET_BBLOCK (cfg, bblock, target);
7039 GET_BBLOCK (cfg, bblock, ip);
7041 case MonoInlineBrTarget:
7042 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7043 GET_BBLOCK (cfg, bblock, target);
7046 GET_BBLOCK (cfg, bblock, ip);
7048 case MonoInlineSwitch: {
7049 guint32 n = read32 (ip + 1);
7052 cli_addr += 5 + 4 * n;
7053 target = start + cli_addr;
7054 GET_BBLOCK (cfg, bblock, target);
7056 for (j = 0; j < n; ++j) {
7057 target = start + cli_addr + (gint32)read32 (ip);
7058 GET_BBLOCK (cfg, bblock, target);
7068 g_assert_not_reached ();
7071 if (i == CEE_THROW) {
7072 unsigned char *bb_start = ip - 1;
7074 /* Find the start of the bblock containing the throw */
7076 while ((bb_start >= start) && !bblock) {
7077 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7081 bblock->out_of_line = 1;
7091 static inline MonoMethod *
7092 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7096 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7097 method = mono_method_get_wrapper_data (m, token);
7100 method = mono_class_inflate_generic_method_checked (method, context, &error);
7101 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7104 method = mono_get_method_full (m->klass->image, token, klass, context);
7110 static inline MonoMethod *
7111 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7113 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7115 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7121 static inline MonoClass*
7122 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7127 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7128 klass = mono_method_get_wrapper_data (method, token);
7130 klass = mono_class_inflate_generic_class (klass, context);
7132 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7133 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7136 mono_class_init (klass);
7140 static inline MonoMethodSignature*
7141 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7143 MonoMethodSignature *fsig;
7145 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7148 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7150 fsig = mono_inflate_generic_signature (fsig, context, &error);
7152 g_assert (mono_error_ok (&error));
7155 fsig = mono_metadata_parse_signature (method->klass->image, token);
7161 * Returns TRUE if the JIT should abort inlining because "callee"
7162 * is influenced by security attributes.
7165 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7169 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
7173 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
7174 if (result == MONO_JIT_SECURITY_OK)
7177 if (result == MONO_JIT_LINKDEMAND_ECMA) {
7178 /* Generate code to throw a SecurityException before the actual call/link */
7179 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7182 NEW_ICONST (cfg, args [0], 4);
7183 NEW_METHODCONST (cfg, args [1], caller);
7184 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
7185 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
7186 /* don't hide previous results */
7187 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
7188 cfg->exception_data = result;
7196 throw_exception (void)
7198 static MonoMethod *method = NULL;
7201 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7202 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7209 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7211 MonoMethod *thrower = throw_exception ();
7214 EMIT_NEW_PCONST (cfg, args [0], ex);
7215 mono_emit_method_call (cfg, thrower, args, NULL);
7219 * Return the original method is a wrapper is specified. We can only access
7220 * the custom attributes from the original method.
7223 get_original_method (MonoMethod *method)
7225 if (method->wrapper_type == MONO_WRAPPER_NONE)
7228 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7229 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7232 /* in other cases we need to find the original method */
7233 return mono_marshal_method_from_wrapper (method);
7237 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7238 MonoBasicBlock *bblock, unsigned char *ip)
7240 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7241 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7243 emit_throw_exception (cfg, ex);
7247 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7248 MonoBasicBlock *bblock, unsigned char *ip)
7250 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7251 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7253 emit_throw_exception (cfg, ex);
7257 * Check that the IL instructions at ip are the array initialization
7258 * sequence and return the pointer to the data and the size.
7261 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7264 * newarr[System.Int32]
7266 * ldtoken field valuetype ...
7267 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7269 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7271 guint32 token = read32 (ip + 7);
7272 guint32 field_token = read32 (ip + 2);
7273 guint32 field_index = field_token & 0xffffff;
7275 const char *data_ptr;
7277 MonoMethod *cmethod;
7278 MonoClass *dummy_class;
7279 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7283 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7287 *out_field_token = field_token;
7289 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7292 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7294 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7295 case MONO_TYPE_BOOLEAN:
7299 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7300 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7301 case MONO_TYPE_CHAR:
7318 if (size > mono_type_size (field->type, &dummy_align))
7321 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7322 if (!image_is_dynamic (method->klass->image)) {
7323 field_index = read32 (ip + 2) & 0xffffff;
7324 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7325 data_ptr = mono_image_rva_map (method->klass->image, rva);
7326 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7327 /* for aot code we do the lookup on load */
7328 if (aot && data_ptr)
7329 return GUINT_TO_POINTER (rva);
7331 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7333 data_ptr = mono_field_get_data (field);
7341 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7343 char *method_fname = mono_method_full_name (method, TRUE);
7345 MonoMethodHeader *header = mono_method_get_header (method);
7347 if (header->code_size == 0)
7348 method_code = g_strdup ("method body is empty.");
7350 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7351 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7352 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7353 g_free (method_fname);
7354 g_free (method_code);
7355 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7359 set_exception_object (MonoCompile *cfg, MonoException *exception)
7361 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7362 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7363 cfg->exception_ptr = exception;
7367 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7370 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7371 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7372 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7373 /* Optimize reg-reg moves away */
7375 * Can't optimize other opcodes, since sp[0] might point to
7376 * the last ins of a decomposed opcode.
7378 sp [0]->dreg = (cfg)->locals [n]->dreg;
7380 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7385 * ldloca inhibits many optimizations so try to get rid of it in common
7388 static inline unsigned char *
7389 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7399 local = read16 (ip + 2);
7403 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7404 /* From the INITOBJ case */
7405 token = read32 (ip + 2);
7406 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7407 CHECK_TYPELOAD (klass);
7408 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7409 emit_init_local (cfg, local, type, TRUE);
7417 is_exception_class (MonoClass *class)
7420 if (class == mono_defaults.exception_class)
7422 class = class->parent;
7428 * is_jit_optimizer_disabled:
7430 * Determine whenever M's assembly has a DebuggableAttribute with the
7431 * IsJITOptimizerDisabled flag set.
7434 is_jit_optimizer_disabled (MonoMethod *m)
7436 MonoAssembly *ass = m->klass->image->assembly;
7437 MonoCustomAttrInfo* attrs;
7438 static MonoClass *klass;
7440 gboolean val = FALSE;
7443 if (ass->jit_optimizer_disabled_inited)
7444 return ass->jit_optimizer_disabled;
7447 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7450 ass->jit_optimizer_disabled = FALSE;
7451 mono_memory_barrier ();
7452 ass->jit_optimizer_disabled_inited = TRUE;
7456 attrs = mono_custom_attrs_from_assembly (ass);
7458 for (i = 0; i < attrs->num_attrs; ++i) {
7459 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7462 MonoMethodSignature *sig;
7464 if (!attr->ctor || attr->ctor->klass != klass)
7466 /* Decode the attribute. See reflection.c */
7467 len = attr->data_size;
7468 p = (const char*)attr->data;
7469 g_assert (read16 (p) == 0x0001);
7472 // FIXME: Support named parameters
7473 sig = mono_method_signature (attr->ctor);
7474 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7476 /* Two boolean arguments */
7480 mono_custom_attrs_free (attrs);
7483 ass->jit_optimizer_disabled = val;
7484 mono_memory_barrier ();
7485 ass->jit_optimizer_disabled_inited = TRUE;
7491 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7493 gboolean supported_tail_call;
7496 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7497 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7499 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7502 for (i = 0; i < fsig->param_count; ++i) {
7503 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7504 /* These can point to the current method's stack */
7505 supported_tail_call = FALSE;
7507 if (fsig->hasthis && cmethod->klass->valuetype)
7508 /* this might point to the current method's stack */
7509 supported_tail_call = FALSE;
7510 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7511 supported_tail_call = FALSE;
7512 if (cfg->method->save_lmf)
7513 supported_tail_call = FALSE;
7514 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7515 supported_tail_call = FALSE;
7516 if (call_opcode != CEE_CALL)
7517 supported_tail_call = FALSE;
7519 /* Debugging support */
7521 if (supported_tail_call) {
7522 if (!mono_debug_count ())
7523 supported_tail_call = FALSE;
7527 return supported_tail_call;
7530 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7531 * it to the thread local value based on the tls_offset field. Every other kind of access to
7532 * the field causes an assert.
7535 is_magic_tls_access (MonoClassField *field)
7537 if (strcmp (field->name, "tlsdata"))
7539 if (strcmp (field->parent->name, "ThreadLocal`1"))
7541 return field->parent->image == mono_defaults.corlib;
7544 /* emits the code needed to access a managed tls var (like ThreadStatic)
7545 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7546 * pointer for the current thread.
7547 * Returns the MonoInst* representing the address of the tls var.
7550 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7553 int static_data_reg, array_reg, dreg;
7554 int offset2_reg, idx_reg;
7555 // inlined access to the tls data
7556 // idx = (offset >> 24) - 1;
7557 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
7558 static_data_reg = alloc_ireg (cfg);
7559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7560 idx_reg = alloc_ireg (cfg);
7561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
7562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7564 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7565 array_reg = alloc_ireg (cfg);
7566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7567 offset2_reg = alloc_ireg (cfg);
7568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7569 dreg = alloc_ireg (cfg);
7570 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7575 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7576 * this address is cached per-method in cached_tls_addr.
7579 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7581 MonoInst *load, *addr, *temp, *store, *thread_ins;
7582 MonoClassField *offset_field;
7584 if (*cached_tls_addr) {
7585 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7588 thread_ins = mono_get_thread_intrinsic (cfg);
7589 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7591 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7593 MONO_ADD_INS (cfg->cbb, thread_ins);
7595 MonoMethod *thread_method;
7596 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7597 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7599 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7600 addr->klass = mono_class_from_mono_type (tls_field->type);
7601 addr->type = STACK_MP;
7602 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7603 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7605 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7612 * Handle calls made to ctors from NEWOBJ opcodes.
7614 * REF_BBLOCK will point to the current bblock after the call.
7617 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7618 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7620 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7621 MonoBasicBlock *bblock = *ref_bblock;
7623 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7624 mono_method_is_generic_sharable (cmethod, TRUE)) {
7625 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7626 mono_class_vtable (cfg->domain, cmethod->klass);
7627 CHECK_TYPELOAD (cmethod->klass);
7629 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7630 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7633 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7634 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7636 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7638 CHECK_TYPELOAD (cmethod->klass);
7639 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7644 /* Avoid virtual calls to ctors if possible */
7645 if (mono_class_is_marshalbyref (cmethod->klass))
7646 callvirt_this_arg = sp [0];
7648 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7649 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7650 CHECK_CFG_EXCEPTION;
7651 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7652 mono_method_check_inlining (cfg, cmethod) &&
7653 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7656 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7657 cfg->real_offset += 5;
7659 *inline_costs += costs - 5;
7660 *ref_bblock = bblock;
7662 INLINE_FAILURE ("inline failure");
7663 // FIXME-VT: Clean this up
7664 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7665 GSHAREDVT_FAILURE(*ip);
7666 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7668 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7671 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7672 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7673 } else if (context_used &&
7674 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7675 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7676 MonoInst *cmethod_addr;
7678 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7680 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7681 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7683 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7685 INLINE_FAILURE ("ctor call");
7686 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7687 callvirt_this_arg, NULL, vtable_arg);
7694 * mono_method_to_ir:
7696 * Translate the .net IL into linear IR.
7699 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7700 MonoInst *return_var, MonoInst **inline_args,
7701 guint inline_offset, gboolean is_virtual_call)
7704 MonoInst *ins, **sp, **stack_start;
7705 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7706 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7707 MonoMethod *cmethod, *method_definition;
7708 MonoInst **arg_array;
7709 MonoMethodHeader *header;
7711 guint32 token, ins_flag;
7713 MonoClass *constrained_call = NULL;
7714 unsigned char *ip, *end, *target, *err_pos;
7715 MonoMethodSignature *sig;
7716 MonoGenericContext *generic_context = NULL;
7717 MonoGenericContainer *generic_container = NULL;
7718 MonoType **param_types;
7719 int i, n, start_new_bblock, dreg;
7720 int num_calls = 0, inline_costs = 0;
7721 int breakpoint_id = 0;
7723 MonoBoolean security, pinvoke;
7724 MonoSecurityManager* secman = NULL;
7725 MonoDeclSecurityActions actions;
7726 GSList *class_inits = NULL;
7727 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7729 gboolean init_locals, seq_points, skip_dead_blocks;
7730 gboolean sym_seq_points = FALSE;
7731 MonoInst *cached_tls_addr = NULL;
7732 MonoDebugMethodInfo *minfo;
7733 MonoBitSet *seq_point_locs = NULL;
7734 MonoBitSet *seq_point_set_locs = NULL;
7736 cfg->disable_inline = is_jit_optimizer_disabled (method);
7738 /* serialization and xdomain stuff may need access to private fields and methods */
7739 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7740 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7741 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7742 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7743 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7744 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7746 dont_verify |= mono_security_smcs_hack_enabled ();
7748 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7749 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7750 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7751 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7752 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7754 image = method->klass->image;
7755 header = mono_method_get_header (method);
7757 MonoLoaderError *error;
7759 if ((error = mono_loader_get_last_error ())) {
7760 mono_cfg_set_exception (cfg, error->exception_type);
7762 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7763 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7765 goto exception_exit;
7767 generic_container = mono_method_get_generic_container (method);
7768 sig = mono_method_signature (method);
7769 num_args = sig->hasthis + sig->param_count;
7770 ip = (unsigned char*)header->code;
7771 cfg->cil_start = ip;
7772 end = ip + header->code_size;
7773 cfg->stat_cil_code_size += header->code_size;
7775 seq_points = cfg->gen_seq_points && cfg->method == method;
7776 #ifdef PLATFORM_ANDROID
7777 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7780 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7781 /* We could hit a seq point before attaching to the JIT (#8338) */
7785 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7786 minfo = mono_debug_lookup_method (method);
7788 int i, n_il_offsets;
7792 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7793 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7794 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7795 sym_seq_points = TRUE;
7796 for (i = 0; i < n_il_offsets; ++i) {
7797 if (il_offsets [i] < header->code_size)
7798 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7800 g_free (il_offsets);
7801 g_free (line_numbers);
7802 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7803 /* Methods without line number info like auto-generated property accessors */
7804 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7805 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7806 sym_seq_points = TRUE;
7811 * Methods without init_locals set could cause asserts in various passes
7812 * (#497220). To work around this, we emit dummy initialization opcodes
7813 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7814 * on some platforms.
7816 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7817 init_locals = header->init_locals;
7821 method_definition = method;
7822 while (method_definition->is_inflated) {
7823 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7824 method_definition = imethod->declaring;
7827 /* SkipVerification is not allowed if core-clr is enabled */
7828 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7830 dont_verify_stloc = TRUE;
7833 if (sig->is_inflated)
7834 generic_context = mono_method_get_context (method);
7835 else if (generic_container)
7836 generic_context = &generic_container->context;
7837 cfg->generic_context = generic_context;
7839 if (!cfg->generic_sharing_context)
7840 g_assert (!sig->has_type_parameters);
7842 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7843 g_assert (method->is_inflated);
7844 g_assert (mono_method_get_context (method)->method_inst);
7846 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7847 g_assert (sig->generic_param_count);
7849 if (cfg->method == method) {
7850 cfg->real_offset = 0;
7852 cfg->real_offset = inline_offset;
7855 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7856 cfg->cil_offset_to_bb_len = header->code_size;
7858 cfg->current_method = method;
7860 if (cfg->verbose_level > 2)
7861 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7863 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7865 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7866 for (n = 0; n < sig->param_count; ++n)
7867 param_types [n + sig->hasthis] = sig->params [n];
7868 cfg->arg_types = param_types;
7870 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7871 if (cfg->method == method) {
7873 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7874 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7877 NEW_BBLOCK (cfg, start_bblock);
7878 cfg->bb_entry = start_bblock;
7879 start_bblock->cil_code = NULL;
7880 start_bblock->cil_length = 0;
7881 #if defined(__native_client_codegen__)
7882 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7883 ins->dreg = alloc_dreg (cfg, STACK_I4);
7884 MONO_ADD_INS (start_bblock, ins);
7888 NEW_BBLOCK (cfg, end_bblock);
7889 cfg->bb_exit = end_bblock;
7890 end_bblock->cil_code = NULL;
7891 end_bblock->cil_length = 0;
7892 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7893 g_assert (cfg->num_bblocks == 2);
7895 arg_array = cfg->args;
7897 if (header->num_clauses) {
7898 cfg->spvars = g_hash_table_new (NULL, NULL);
7899 cfg->exvars = g_hash_table_new (NULL, NULL);
7901 /* handle exception clauses */
7902 for (i = 0; i < header->num_clauses; ++i) {
7903 MonoBasicBlock *try_bb;
7904 MonoExceptionClause *clause = &header->clauses [i];
7905 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7906 try_bb->real_offset = clause->try_offset;
7907 try_bb->try_start = TRUE;
7908 try_bb->region = ((i + 1) << 8) | clause->flags;
7909 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7910 tblock->real_offset = clause->handler_offset;
7911 tblock->flags |= BB_EXCEPTION_HANDLER;
7914 * Linking the try block with the EH block hinders inlining as we won't be able to
7915 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7917 if (COMPILE_LLVM (cfg))
7918 link_bblock (cfg, try_bb, tblock);
7920 if (*(ip + clause->handler_offset) == CEE_POP)
7921 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7923 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7924 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7925 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7926 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7927 MONO_ADD_INS (tblock, ins);
7929 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7930 /* finally clauses already have a seq point */
7931 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7932 MONO_ADD_INS (tblock, ins);
7935 /* todo: is a fault block unsafe to optimize? */
7936 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7937 tblock->flags |= BB_EXCEPTION_UNSAFE;
7941 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7943 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7945 /* catch and filter blocks get the exception object on the stack */
7946 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7947 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7948 MonoInst *dummy_use;
7950 /* mostly like handle_stack_args (), but just sets the input args */
7951 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7952 tblock->in_scount = 1;
7953 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7954 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7957 * Add a dummy use for the exvar so its liveness info will be
7961 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7963 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7964 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7965 tblock->flags |= BB_EXCEPTION_HANDLER;
7966 tblock->real_offset = clause->data.filter_offset;
7967 tblock->in_scount = 1;
7968 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7969 /* The filter block shares the exvar with the handler block */
7970 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7971 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7972 MONO_ADD_INS (tblock, ins);
7976 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7977 clause->data.catch_class &&
7978 cfg->generic_sharing_context &&
7979 mono_class_check_context_used (clause->data.catch_class)) {
7981 * In shared generic code with catch
7982 * clauses containing type variables
7983 * the exception handling code has to
7984 * be able to get to the rgctx.
7985 * Therefore we have to make sure that
7986 * the vtable/mrgctx argument (for
7987 * static or generic methods) or the
7988 * "this" argument (for non-static
7989 * methods) are live.
7991 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7992 mini_method_get_context (method)->method_inst ||
7993 method->klass->valuetype) {
7994 mono_get_vtable_var (cfg);
7996 MonoInst *dummy_use;
7998 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8003 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8004 cfg->cbb = start_bblock;
8005 cfg->args = arg_array;
8006 mono_save_args (cfg, sig, inline_args);
8009 /* FIRST CODE BLOCK */
8010 NEW_BBLOCK (cfg, bblock);
8011 bblock->cil_code = ip;
8015 ADD_BBLOCK (cfg, bblock);
8017 if (cfg->method == method) {
8018 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8019 if (breakpoint_id) {
8020 MONO_INST_NEW (cfg, ins, OP_BREAK);
8021 MONO_ADD_INS (bblock, ins);
8025 if (mono_security_cas_enabled ())
8026 secman = mono_security_manager_get_methods ();
8028 security = (secman && mono_security_method_has_declsec (method));
8029 /* at this point having security doesn't mean we have any code to generate */
8030 if (security && (cfg->method == method)) {
8031 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
8032 * And we do not want to enter the next section (with allocation) if we
8033 * have nothing to generate */
8034 security = mono_declsec_get_demands (method, &actions);
8037 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
8038 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
8040 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8041 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8042 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
8044 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
8045 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8049 mono_custom_attrs_free (custom);
8052 custom = mono_custom_attrs_from_class (wrapped->klass);
8053 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8057 mono_custom_attrs_free (custom);
8060 /* not a P/Invoke after all */
8065 /* we use a separate basic block for the initialization code */
8066 NEW_BBLOCK (cfg, init_localsbb);
8067 cfg->bb_init = init_localsbb;
8068 init_localsbb->real_offset = cfg->real_offset;
8069 start_bblock->next_bb = init_localsbb;
8070 init_localsbb->next_bb = bblock;
8071 link_bblock (cfg, start_bblock, init_localsbb);
8072 link_bblock (cfg, init_localsbb, bblock);
8074 cfg->cbb = init_localsbb;
8076 if (cfg->gsharedvt && cfg->method == method) {
8077 MonoGSharedVtMethodInfo *info;
8078 MonoInst *var, *locals_var;
8081 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8082 info->method = cfg->method;
8083 info->count_entries = 16;
8084 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8085 cfg->gsharedvt_info = info;
8087 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8088 /* prevent it from being register allocated */
8089 //var->flags |= MONO_INST_VOLATILE;
8090 cfg->gsharedvt_info_var = var;
8092 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8095 /* Allocate locals */
8096 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8097 /* prevent it from being register allocated */
8098 //locals_var->flags |= MONO_INST_VOLATILE;
8099 cfg->gsharedvt_locals_var = locals_var;
8101 dreg = alloc_ireg (cfg);
8102 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8104 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8105 ins->dreg = locals_var->dreg;
8107 MONO_ADD_INS (cfg->cbb, ins);
8108 cfg->gsharedvt_locals_var_ins = ins;
8110 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8113 ins->flags |= MONO_INST_INIT;
8117 /* at this point we know, if security is TRUE, that some code needs to be generated */
8118 if (security && (cfg->method == method)) {
8121 cfg->stat_cas_demand_generation++;
8123 if (actions.demand.blob) {
8124 /* Add code for SecurityAction.Demand */
8125 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
8126 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
8127 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8128 mono_emit_method_call (cfg, secman->demand, args, NULL);
8130 if (actions.noncasdemand.blob) {
8131 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
8132 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
8133 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
8134 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
8135 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8136 mono_emit_method_call (cfg, secman->demand, args, NULL);
8138 if (actions.demandchoice.blob) {
8139 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
8140 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
8141 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
8142 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
8143 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
8147 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
8149 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
8152 if (mono_security_core_clr_enabled ()) {
8153 /* check if this is native code, e.g. an icall or a p/invoke */
8154 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8155 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8157 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8158 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8160 /* if this ia a native call then it can only be JITted from platform code */
8161 if ((icall || pinvk) && method->klass && method->klass->image) {
8162 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8163 MonoException *ex = icall ? mono_get_exception_security () :
8164 mono_get_exception_method_access ();
8165 emit_throw_exception (cfg, ex);
8172 CHECK_CFG_EXCEPTION;
8174 if (header->code_size == 0)
8177 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8182 if (cfg->method == method)
8183 mono_debug_init_method (cfg, bblock, breakpoint_id);
8185 for (n = 0; n < header->num_locals; ++n) {
8186 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8191 /* We force the vtable variable here for all shared methods
8192 for the possibility that they might show up in a stack
8193 trace where their exact instantiation is needed. */
8194 if (cfg->generic_sharing_context && method == cfg->method) {
8195 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8196 mini_method_get_context (method)->method_inst ||
8197 method->klass->valuetype) {
8198 mono_get_vtable_var (cfg);
8200 /* FIXME: Is there a better way to do this?
8201 We need the variable live for the duration
8202 of the whole method. */
8203 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8207 /* add a check for this != NULL to inlined methods */
8208 if (is_virtual_call) {
8211 NEW_ARGLOAD (cfg, arg_ins, 0);
8212 MONO_ADD_INS (cfg->cbb, arg_ins);
8213 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8216 skip_dead_blocks = !dont_verify;
8217 if (skip_dead_blocks) {
8218 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8223 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8224 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8227 start_new_bblock = 0;
8230 if (cfg->method == method)
8231 cfg->real_offset = ip - header->code;
8233 cfg->real_offset = inline_offset;
8238 if (start_new_bblock) {
8239 bblock->cil_length = ip - bblock->cil_code;
8240 if (start_new_bblock == 2) {
8241 g_assert (ip == tblock->cil_code);
8243 GET_BBLOCK (cfg, tblock, ip);
8245 bblock->next_bb = tblock;
8248 start_new_bblock = 0;
8249 for (i = 0; i < bblock->in_scount; ++i) {
8250 if (cfg->verbose_level > 3)
8251 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8252 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8256 g_slist_free (class_inits);
8259 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8260 link_bblock (cfg, bblock, tblock);
8261 if (sp != stack_start) {
8262 handle_stack_args (cfg, stack_start, sp - stack_start);
8264 CHECK_UNVERIFIABLE (cfg);
8266 bblock->next_bb = tblock;
8269 for (i = 0; i < bblock->in_scount; ++i) {
8270 if (cfg->verbose_level > 3)
8271 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8272 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8275 g_slist_free (class_inits);
8280 if (skip_dead_blocks) {
8281 int ip_offset = ip - header->code;
8283 if (ip_offset == bb->end)
8287 int op_size = mono_opcode_size (ip, end);
8288 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8290 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8292 if (ip_offset + op_size == bb->end) {
8293 MONO_INST_NEW (cfg, ins, OP_NOP);
8294 MONO_ADD_INS (bblock, ins);
8295 start_new_bblock = 1;
8303 * Sequence points are points where the debugger can place a breakpoint.
8304 * Currently, we generate these automatically at points where the IL
8307 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8309 * Make methods interruptable at the beginning, and at the targets of
8310 * backward branches.
8311 * Also, do this at the start of every bblock in methods with clauses too,
8312 * to be able to handle instructions with inprecise control flow like
8314 * Backward branches are handled at the end of method-to-ir ().
8316 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8318 /* Avoid sequence points on empty IL like .volatile */
8319 // FIXME: Enable this
8320 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8321 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8322 if (sp != stack_start)
8323 ins->flags |= MONO_INST_NONEMPTY_STACK;
8324 MONO_ADD_INS (cfg->cbb, ins);
8327 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8330 bblock->real_offset = cfg->real_offset;
8332 if ((cfg->method == method) && cfg->coverage_info) {
8333 guint32 cil_offset = ip - header->code;
8334 cfg->coverage_info->data [cil_offset].cil_code = ip;
8336 /* TODO: Use an increment here */
8337 #if defined(TARGET_X86)
8338 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8339 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8341 MONO_ADD_INS (cfg->cbb, ins);
8343 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8344 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8348 if (cfg->verbose_level > 3)
8349 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8353 if (seq_points && !sym_seq_points && sp != stack_start) {
8355 * The C# compiler uses these nops to notify the JIT that it should
8356 * insert seq points.
8358 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8359 MONO_ADD_INS (cfg->cbb, ins);
8361 if (cfg->keep_cil_nops)
8362 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8364 MONO_INST_NEW (cfg, ins, OP_NOP);
8366 MONO_ADD_INS (bblock, ins);
8369 if (should_insert_brekpoint (cfg->method)) {
8370 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8372 MONO_INST_NEW (cfg, ins, OP_NOP);
8375 MONO_ADD_INS (bblock, ins);
8381 CHECK_STACK_OVF (1);
8382 n = (*ip)-CEE_LDARG_0;
8384 EMIT_NEW_ARGLOAD (cfg, ins, n);
8392 CHECK_STACK_OVF (1);
8393 n = (*ip)-CEE_LDLOC_0;
8395 EMIT_NEW_LOCLOAD (cfg, ins, n);
8404 n = (*ip)-CEE_STLOC_0;
8407 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8409 emit_stloc_ir (cfg, sp, header, n);
8416 CHECK_STACK_OVF (1);
8419 EMIT_NEW_ARGLOAD (cfg, ins, n);
8425 CHECK_STACK_OVF (1);
8428 NEW_ARGLOADA (cfg, ins, n);
8429 MONO_ADD_INS (cfg->cbb, ins);
8439 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8441 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8446 CHECK_STACK_OVF (1);
8449 EMIT_NEW_LOCLOAD (cfg, ins, n);
8453 case CEE_LDLOCA_S: {
8454 unsigned char *tmp_ip;
8456 CHECK_STACK_OVF (1);
8457 CHECK_LOCAL (ip [1]);
8459 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8465 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8474 CHECK_LOCAL (ip [1]);
8475 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8477 emit_stloc_ir (cfg, sp, header, ip [1]);
8482 CHECK_STACK_OVF (1);
8483 EMIT_NEW_PCONST (cfg, ins, NULL);
8484 ins->type = STACK_OBJ;
8489 CHECK_STACK_OVF (1);
8490 EMIT_NEW_ICONST (cfg, ins, -1);
8503 CHECK_STACK_OVF (1);
8504 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8510 CHECK_STACK_OVF (1);
8512 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8518 CHECK_STACK_OVF (1);
8519 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8525 CHECK_STACK_OVF (1);
8526 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8527 ins->type = STACK_I8;
8528 ins->dreg = alloc_dreg (cfg, STACK_I8);
8530 ins->inst_l = (gint64)read64 (ip);
8531 MONO_ADD_INS (bblock, ins);
8537 gboolean use_aotconst = FALSE;
8539 #ifdef TARGET_POWERPC
8540 /* FIXME: Clean this up */
8541 if (cfg->compile_aot)
8542 use_aotconst = TRUE;
8545 /* FIXME: we should really allocate this only late in the compilation process */
8546 f = mono_domain_alloc (cfg->domain, sizeof (float));
8548 CHECK_STACK_OVF (1);
8554 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8556 dreg = alloc_freg (cfg);
8557 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8558 ins->type = cfg->r4_stack_type;
8560 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8561 ins->type = cfg->r4_stack_type;
8562 ins->dreg = alloc_dreg (cfg, STACK_R8);
8564 MONO_ADD_INS (bblock, ins);
8574 gboolean use_aotconst = FALSE;
8576 #ifdef TARGET_POWERPC
8577 /* FIXME: Clean this up */
8578 if (cfg->compile_aot)
8579 use_aotconst = TRUE;
8582 /* FIXME: we should really allocate this only late in the compilation process */
8583 d = mono_domain_alloc (cfg->domain, sizeof (double));
8585 CHECK_STACK_OVF (1);
8591 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8593 dreg = alloc_freg (cfg);
8594 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8595 ins->type = STACK_R8;
8597 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8598 ins->type = STACK_R8;
8599 ins->dreg = alloc_dreg (cfg, STACK_R8);
8601 MONO_ADD_INS (bblock, ins);
8610 MonoInst *temp, *store;
8612 CHECK_STACK_OVF (1);
8616 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8617 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8619 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8622 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8635 if (sp [0]->type == STACK_R8)
8636 /* we need to pop the value from the x86 FP stack */
8637 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8643 INLINE_FAILURE ("jmp");
8644 GSHAREDVT_FAILURE (*ip);
8647 if (stack_start != sp)
8649 token = read32 (ip + 1);
8650 /* FIXME: check the signature matches */
8651 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8653 if (!cmethod || mono_loader_get_last_error ())
8656 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8657 GENERIC_SHARING_FAILURE (CEE_JMP);
8659 if (mono_security_cas_enabled ())
8660 CHECK_CFG_EXCEPTION;
8662 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8664 if (ARCH_HAVE_OP_TAIL_CALL) {
8665 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8668 /* Handle tail calls similarly to calls */
8669 n = fsig->param_count + fsig->hasthis;
8673 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8674 call->method = cmethod;
8675 call->tail_call = TRUE;
8676 call->signature = mono_method_signature (cmethod);
8677 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8678 call->inst.inst_p0 = cmethod;
8679 for (i = 0; i < n; ++i)
8680 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8682 mono_arch_emit_call (cfg, call);
8683 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8684 MONO_ADD_INS (bblock, (MonoInst*)call);
8686 for (i = 0; i < num_args; ++i)
8687 /* Prevent arguments from being optimized away */
8688 arg_array [i]->flags |= MONO_INST_VOLATILE;
8690 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8691 ins = (MonoInst*)call;
8692 ins->inst_p0 = cmethod;
8693 MONO_ADD_INS (bblock, ins);
8697 start_new_bblock = 1;
8702 case CEE_CALLVIRT: {
8703 MonoInst *addr = NULL;
8704 MonoMethodSignature *fsig = NULL;
8706 int virtual = *ip == CEE_CALLVIRT;
8707 int calli = *ip == CEE_CALLI;
8708 gboolean pass_imt_from_rgctx = FALSE;
8709 MonoInst *imt_arg = NULL;
8710 MonoInst *keep_this_alive = NULL;
8711 gboolean pass_vtable = FALSE;
8712 gboolean pass_mrgctx = FALSE;
8713 MonoInst *vtable_arg = NULL;
8714 gboolean check_this = FALSE;
8715 gboolean supported_tail_call = FALSE;
8716 gboolean tail_call = FALSE;
8717 gboolean need_seq_point = FALSE;
8718 guint32 call_opcode = *ip;
8719 gboolean emit_widen = TRUE;
8720 gboolean push_res = TRUE;
8721 gboolean skip_ret = FALSE;
8722 gboolean delegate_invoke = FALSE;
8725 token = read32 (ip + 1);
8730 //GSHAREDVT_FAILURE (*ip);
8735 fsig = mini_get_signature (method, token, generic_context);
8736 n = fsig->param_count + fsig->hasthis;
8738 if (method->dynamic && fsig->pinvoke) {
8742 * This is a call through a function pointer using a pinvoke
8743 * signature. Have to create a wrapper and call that instead.
8744 * FIXME: This is very slow, need to create a wrapper at JIT time
8745 * instead based on the signature.
8747 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8748 EMIT_NEW_PCONST (cfg, args [1], fsig);
8750 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8753 MonoMethod *cil_method;
8755 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8756 cil_method = cmethod;
8758 if (constrained_call) {
8759 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8760 if (cfg->verbose_level > 2)
8761 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8762 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
8763 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
8764 cfg->generic_sharing_context)) {
8765 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context, &cfg->error);
8769 if (cfg->verbose_level > 2)
8770 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8772 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8774 * This is needed since get_method_constrained can't find
8775 * the method in klass representing a type var.
8776 * The type var is guaranteed to be a reference type in this
8779 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
8780 g_assert (!cmethod->klass->valuetype);
8782 cmethod = mono_get_method_constrained_checked (image, token, constrained_call, generic_context, &cil_method, &cfg->error);
8788 if (!cmethod || mono_loader_get_last_error ())
8790 if (!dont_verify && !cfg->skip_visibility) {
8791 MonoMethod *target_method = cil_method;
8792 if (method->is_inflated) {
8793 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8795 if (!mono_method_can_access_method (method_definition, target_method) &&
8796 !mono_method_can_access_method (method, cil_method))
8797 METHOD_ACCESS_FAILURE (method, cil_method);
8800 if (mono_security_core_clr_enabled ())
8801 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8803 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8804 /* MS.NET seems to silently convert this to a callvirt */
8809 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8810 * converts to a callvirt.
8812 * tests/bug-515884.il is an example of this behavior
8814 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8815 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8816 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8820 if (!cmethod->klass->inited)
8821 if (!mono_class_init (cmethod->klass))
8822 TYPE_LOAD_ERROR (cmethod->klass);
8824 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8825 mini_class_is_system_array (cmethod->klass)) {
8826 array_rank = cmethod->klass->rank;
8827 fsig = mono_method_signature (cmethod);
8829 fsig = mono_method_signature (cmethod);
8834 if (fsig->pinvoke) {
8835 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8836 check_for_pending_exc, cfg->compile_aot);
8837 fsig = mono_method_signature (wrapper);
8838 } else if (constrained_call) {
8839 fsig = mono_method_signature (cmethod);
8841 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8846 mono_save_token_info (cfg, image, token, cil_method);
8848 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8849 need_seq_point = TRUE;
8851 n = fsig->param_count + fsig->hasthis;
8853 /* Don't support calls made using type arguments for now */
8855 if (cfg->gsharedvt) {
8856 if (mini_is_gsharedvt_signature (cfg, fsig))
8857 GSHAREDVT_FAILURE (*ip);
8861 if (mono_security_cas_enabled ()) {
8862 if (check_linkdemand (cfg, method, cmethod))
8863 INLINE_FAILURE ("linkdemand");
8864 CHECK_CFG_EXCEPTION;
8867 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8868 g_assert_not_reached ();
8871 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8874 if (!cfg->generic_sharing_context && cmethod)
8875 g_assert (!mono_method_check_context_used (cmethod));
8879 //g_assert (!virtual || fsig->hasthis);
8883 if (constrained_call) {
8884 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8885 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8886 /* The 'Own method' case below */
8887 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8888 /* 'The type parameter is instantiated as a reference type' case below. */
8890 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_call, &emit_widen, &bblock);
8891 CHECK_CFG_EXCEPTION;
8898 * We have the `constrained.' prefix opcode.
8900 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8902 * The type parameter is instantiated as a valuetype,
8903 * but that type doesn't override the method we're
8904 * calling, so we need to box `this'.
8906 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8907 ins->klass = constrained_call;
8908 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8909 CHECK_CFG_EXCEPTION;
8910 } else if (!constrained_call->valuetype) {
8911 int dreg = alloc_ireg_ref (cfg);
8914 * The type parameter is instantiated as a reference
8915 * type. We have a managed pointer on the stack, so
8916 * we need to dereference it here.
8918 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8919 ins->type = STACK_OBJ;
8922 if (cmethod->klass->valuetype) {
8925 /* Interface method */
8928 mono_class_setup_vtable (constrained_call);
8929 CHECK_TYPELOAD (constrained_call);
8930 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8932 TYPE_LOAD_ERROR (constrained_call);
8933 slot = mono_method_get_vtable_slot (cmethod);
8935 TYPE_LOAD_ERROR (cmethod->klass);
8936 cmethod = constrained_call->vtable [ioffset + slot];
8938 if (cmethod->klass == mono_defaults.enum_class) {
8939 /* Enum implements some interfaces, so treat this as the first case */
8940 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8941 ins->klass = constrained_call;
8942 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8943 CHECK_CFG_EXCEPTION;
8948 constrained_call = NULL;
8951 if (!calli && check_call_signature (cfg, fsig, sp))
8954 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8955 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8956 delegate_invoke = TRUE;
8959 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8961 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8962 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8970 * If the callee is a shared method, then its static cctor
8971 * might not get called after the call was patched.
8973 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8974 emit_generic_class_init (cfg, cmethod->klass);
8975 CHECK_TYPELOAD (cmethod->klass);
8979 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8981 if (cfg->generic_sharing_context && cmethod) {
8982 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8984 context_used = mini_method_check_context_used (cfg, cmethod);
8986 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8987 /* Generic method interface
8988 calls are resolved via a
8989 helper function and don't
8991 if (!cmethod_context || !cmethod_context->method_inst)
8992 pass_imt_from_rgctx = TRUE;
8996 * If a shared method calls another
8997 * shared method then the caller must
8998 * have a generic sharing context
8999 * because the magic trampoline
9000 * requires it. FIXME: We shouldn't
9001 * have to force the vtable/mrgctx
9002 * variable here. Instead there
9003 * should be a flag in the cfg to
9004 * request a generic sharing context.
9007 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9008 mono_get_vtable_var (cfg);
9013 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9015 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9017 CHECK_TYPELOAD (cmethod->klass);
9018 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9023 g_assert (!vtable_arg);
9025 if (!cfg->compile_aot) {
9027 * emit_get_rgctx_method () calls mono_class_vtable () so check
9028 * for type load errors before.
9030 mono_class_setup_vtable (cmethod->klass);
9031 CHECK_TYPELOAD (cmethod->klass);
9034 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9036 /* !marshalbyref is needed to properly handle generic methods + remoting */
9037 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9038 MONO_METHOD_IS_FINAL (cmethod)) &&
9039 !mono_class_is_marshalbyref (cmethod->klass)) {
9046 if (pass_imt_from_rgctx) {
9047 g_assert (!pass_vtable);
9050 imt_arg = emit_get_rgctx_method (cfg, context_used,
9051 cmethod, MONO_RGCTX_INFO_METHOD);
9055 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9057 /* Calling virtual generic methods */
9058 if (cmethod && virtual &&
9059 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9060 !(MONO_METHOD_IS_FINAL (cmethod) &&
9061 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9062 fsig->generic_param_count &&
9063 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9064 MonoInst *this_temp, *this_arg_temp, *store;
9065 MonoInst *iargs [4];
9066 gboolean use_imt = FALSE;
9068 g_assert (fsig->is_inflated);
9070 /* Prevent inlining of methods that contain indirect calls */
9071 INLINE_FAILURE ("virtual generic call");
9073 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9074 GSHAREDVT_FAILURE (*ip);
9076 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9077 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
9082 g_assert (!imt_arg);
9084 g_assert (cmethod->is_inflated);
9085 imt_arg = emit_get_rgctx_method (cfg, context_used,
9086 cmethod, MONO_RGCTX_INFO_METHOD);
9087 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9089 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9090 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9091 MONO_ADD_INS (bblock, store);
9093 /* FIXME: This should be a managed pointer */
9094 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9096 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9097 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9098 cmethod, MONO_RGCTX_INFO_METHOD);
9099 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9100 addr = mono_emit_jit_icall (cfg,
9101 mono_helper_compile_generic_method, iargs);
9103 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9105 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9112 * Implement a workaround for the inherent races involved in locking:
9118 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9119 * try block, the Exit () won't be executed, see:
9120 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9121 * To work around this, we extend such try blocks to include the last x bytes
9122 * of the Monitor.Enter () call.
9124 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9125 MonoBasicBlock *tbb;
9127 GET_BBLOCK (cfg, tbb, ip + 5);
9129 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9130 * from Monitor.Enter like ArgumentNullException.
9132 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9133 /* Mark this bblock as needing to be extended */
9134 tbb->extend_try_block = TRUE;
9138 /* Conversion to a JIT intrinsic */
9139 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9141 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9142 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9149 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
9150 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9151 mono_method_check_inlining (cfg, cmethod)) {
9153 gboolean always = FALSE;
9155 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9156 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9157 /* Prevent inlining of methods that call wrappers */
9158 INLINE_FAILURE ("wrapper call");
9159 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9163 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9165 cfg->real_offset += 5;
9167 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9168 /* *sp is already set by inline_method */
9173 inline_costs += costs;
9179 /* Tail recursion elimination */
9180 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9181 gboolean has_vtargs = FALSE;
9184 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9185 INLINE_FAILURE ("tail call");
9187 /* keep it simple */
9188 for (i = fsig->param_count - 1; i >= 0; i--) {
9189 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9194 for (i = 0; i < n; ++i)
9195 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9196 MONO_INST_NEW (cfg, ins, OP_BR);
9197 MONO_ADD_INS (bblock, ins);
9198 tblock = start_bblock->out_bb [0];
9199 link_bblock (cfg, bblock, tblock);
9200 ins->inst_target_bb = tblock;
9201 start_new_bblock = 1;
9203 /* skip the CEE_RET, too */
9204 if (ip_in_bb (cfg, bblock, ip + 5))
9211 inline_costs += 10 * num_calls++;
9214 * Making generic calls out of gsharedvt methods.
9215 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9216 * patching gshared method addresses into a gsharedvt method.
9218 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9219 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9220 MonoRgctxInfoType info_type;
9223 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9224 //GSHAREDVT_FAILURE (*ip);
9225 // disable for possible remoting calls
9226 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9227 GSHAREDVT_FAILURE (*ip);
9228 if (fsig->generic_param_count) {
9229 /* virtual generic call */
9230 g_assert (mono_use_imt);
9231 g_assert (!imt_arg);
9232 /* Same as the virtual generic case above */
9233 imt_arg = emit_get_rgctx_method (cfg, context_used,
9234 cmethod, MONO_RGCTX_INFO_METHOD);
9235 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9237 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9238 /* This can happen when we call a fully instantiated iface method */
9239 imt_arg = emit_get_rgctx_method (cfg, context_used,
9240 cmethod, MONO_RGCTX_INFO_METHOD);
9245 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9246 keep_this_alive = sp [0];
9248 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9249 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9251 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9252 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9254 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9256 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9258 * We pass the address to the gsharedvt trampoline in the rgctx reg
9260 MonoInst *callee = addr;
9262 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9264 GSHAREDVT_FAILURE (*ip);
9266 addr = emit_get_rgctx_sig (cfg, context_used,
9267 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9268 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9272 /* Generic sharing */
9275 * Use this if the callee is gsharedvt sharable too, since
9276 * at runtime we might find an instantiation so the call cannot
9277 * be patched (the 'no_patch' code path in mini-trampolines.c).
9279 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9280 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9281 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9282 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9283 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9284 INLINE_FAILURE ("gshared");
9286 g_assert (cfg->generic_sharing_context && cmethod);
9290 * We are compiling a call to a
9291 * generic method from shared code,
9292 * which means that we have to look up
9293 * the method in the rgctx and do an
9297 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9299 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9300 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9304 /* Indirect calls */
9306 if (call_opcode == CEE_CALL)
9307 g_assert (context_used);
9308 else if (call_opcode == CEE_CALLI)
9309 g_assert (!vtable_arg);
9311 /* FIXME: what the hell is this??? */
9312 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
9313 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
9315 /* Prevent inlining of methods with indirect calls */
9316 INLINE_FAILURE ("indirect call");
9318 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9323 * Instead of emitting an indirect call, emit a direct call
9324 * with the contents of the aotconst as the patch info.
9326 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9327 info_type = addr->inst_c1;
9328 info_data = addr->inst_p0;
9330 info_type = addr->inst_right->inst_c1;
9331 info_data = addr->inst_right->inst_left;
9334 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9335 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9340 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9348 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9349 MonoInst *val = sp [fsig->param_count];
9351 if (val->type == STACK_OBJ) {
9352 MonoInst *iargs [2];
9357 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9360 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9361 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9362 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9363 emit_write_barrier (cfg, addr, val);
9364 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9365 GSHAREDVT_FAILURE (*ip);
9366 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9367 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9369 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9370 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9371 if (!cmethod->klass->element_class->valuetype && !readonly)
9372 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9373 CHECK_TYPELOAD (cmethod->klass);
9376 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9379 g_assert_not_reached ();
9386 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9390 /* Tail prefix / tail call optimization */
9392 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9393 /* FIXME: runtime generic context pointer for jumps? */
9394 /* FIXME: handle this for generic sharing eventually */
9395 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
9396 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9397 supported_tail_call = TRUE;
9399 if (supported_tail_call) {
9402 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9403 INLINE_FAILURE ("tail call");
9405 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9407 if (ARCH_HAVE_OP_TAIL_CALL) {
9408 /* Handle tail calls similarly to normal calls */
9411 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9413 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9414 call->tail_call = TRUE;
9415 call->method = cmethod;
9416 call->signature = mono_method_signature (cmethod);
9419 * We implement tail calls by storing the actual arguments into the
9420 * argument variables, then emitting a CEE_JMP.
9422 for (i = 0; i < n; ++i) {
9423 /* Prevent argument from being register allocated */
9424 arg_array [i]->flags |= MONO_INST_VOLATILE;
9425 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9427 ins = (MonoInst*)call;
9428 ins->inst_p0 = cmethod;
9429 ins->inst_p1 = arg_array [0];
9430 MONO_ADD_INS (bblock, ins);
9431 link_bblock (cfg, bblock, end_bblock);
9432 start_new_bblock = 1;
9434 // FIXME: Eliminate unreachable epilogs
9437 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9438 * only reachable from this call.
9440 GET_BBLOCK (cfg, tblock, ip + 5);
9441 if (tblock == bblock || tblock->in_count == 0)
9450 * Synchronized wrappers.
9451 * Its hard to determine where to replace a method with its synchronized
9452 * wrapper without causing an infinite recursion. The current solution is
9453 * to add the synchronized wrapper in the trampolines, and to
9454 * change the called method to a dummy wrapper, and resolve that wrapper
9455 * to the real method in mono_jit_compile_method ().
9457 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9458 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9459 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9460 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9464 INLINE_FAILURE ("call");
9465 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9466 imt_arg, vtable_arg);
9469 link_bblock (cfg, bblock, end_bblock);
9470 start_new_bblock = 1;
9472 // FIXME: Eliminate unreachable epilogs
9475 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9476 * only reachable from this call.
9478 GET_BBLOCK (cfg, tblock, ip + 5);
9479 if (tblock == bblock || tblock->in_count == 0)
9486 /* End of call, INS should contain the result of the call, if any */
9488 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9491 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9496 if (keep_this_alive) {
9497 MonoInst *dummy_use;
9499 /* See mono_emit_method_call_full () */
9500 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9503 CHECK_CFG_EXCEPTION;
9507 g_assert (*ip == CEE_RET);
9511 constrained_call = NULL;
9513 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9517 if (cfg->method != method) {
9518 /* return from inlined method */
9520 * If in_count == 0, that means the ret is unreachable due to
9521 * being preceeded by a throw. In that case, inline_method () will
9522 * handle setting the return value
9523 * (test case: test_0_inline_throw ()).
9525 if (return_var && cfg->cbb->in_count) {
9526 MonoType *ret_type = mono_method_signature (method)->ret;
9532 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9535 //g_assert (returnvar != -1);
9536 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9537 cfg->ret_var_set = TRUE;
9540 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9542 if (cfg->lmf_var && cfg->cbb->in_count)
9546 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9548 if (seq_points && !sym_seq_points) {
9550 * Place a seq point here too even through the IL stack is not
9551 * empty, so a step over on
9554 * will work correctly.
9556 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9557 MONO_ADD_INS (cfg->cbb, ins);
9560 g_assert (!return_var);
9564 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9567 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9570 if (!cfg->vret_addr) {
9573 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9575 EMIT_NEW_RETLOADA (cfg, ret_addr);
9577 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9578 ins->klass = mono_class_from_mono_type (ret_type);
9581 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9582 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9583 MonoInst *iargs [1];
9587 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9588 mono_arch_emit_setret (cfg, method, conv);
9590 mono_arch_emit_setret (cfg, method, *sp);
9593 mono_arch_emit_setret (cfg, method, *sp);
9598 if (sp != stack_start)
9600 MONO_INST_NEW (cfg, ins, OP_BR);
9602 ins->inst_target_bb = end_bblock;
9603 MONO_ADD_INS (bblock, ins);
9604 link_bblock (cfg, bblock, end_bblock);
9605 start_new_bblock = 1;
9609 MONO_INST_NEW (cfg, ins, OP_BR);
9611 target = ip + 1 + (signed char)(*ip);
9613 GET_BBLOCK (cfg, tblock, target);
9614 link_bblock (cfg, bblock, tblock);
9615 ins->inst_target_bb = tblock;
9616 if (sp != stack_start) {
9617 handle_stack_args (cfg, stack_start, sp - stack_start);
9619 CHECK_UNVERIFIABLE (cfg);
9621 MONO_ADD_INS (bblock, ins);
9622 start_new_bblock = 1;
9623 inline_costs += BRANCH_COST;
9637 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9639 target = ip + 1 + *(signed char*)ip;
9645 inline_costs += BRANCH_COST;
9649 MONO_INST_NEW (cfg, ins, OP_BR);
9652 target = ip + 4 + (gint32)read32(ip);
9654 GET_BBLOCK (cfg, tblock, target);
9655 link_bblock (cfg, bblock, tblock);
9656 ins->inst_target_bb = tblock;
9657 if (sp != stack_start) {
9658 handle_stack_args (cfg, stack_start, sp - stack_start);
9660 CHECK_UNVERIFIABLE (cfg);
9663 MONO_ADD_INS (bblock, ins);
9665 start_new_bblock = 1;
9666 inline_costs += BRANCH_COST;
9673 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9674 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9675 guint32 opsize = is_short ? 1 : 4;
9677 CHECK_OPSIZE (opsize);
9679 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9682 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9687 GET_BBLOCK (cfg, tblock, target);
9688 link_bblock (cfg, bblock, tblock);
9689 GET_BBLOCK (cfg, tblock, ip);
9690 link_bblock (cfg, bblock, tblock);
9692 if (sp != stack_start) {
9693 handle_stack_args (cfg, stack_start, sp - stack_start);
9694 CHECK_UNVERIFIABLE (cfg);
9697 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9698 cmp->sreg1 = sp [0]->dreg;
9699 type_from_op (cfg, cmp, sp [0], NULL);
9702 #if SIZEOF_REGISTER == 4
9703 if (cmp->opcode == OP_LCOMPARE_IMM) {
9704 /* Convert it to OP_LCOMPARE */
9705 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9706 ins->type = STACK_I8;
9707 ins->dreg = alloc_dreg (cfg, STACK_I8);
9709 MONO_ADD_INS (bblock, ins);
9710 cmp->opcode = OP_LCOMPARE;
9711 cmp->sreg2 = ins->dreg;
9714 MONO_ADD_INS (bblock, cmp);
9716 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9717 type_from_op (cfg, ins, sp [0], NULL);
9718 MONO_ADD_INS (bblock, ins);
9719 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9720 GET_BBLOCK (cfg, tblock, target);
9721 ins->inst_true_bb = tblock;
9722 GET_BBLOCK (cfg, tblock, ip);
9723 ins->inst_false_bb = tblock;
9724 start_new_bblock = 2;
9727 inline_costs += BRANCH_COST;
9742 MONO_INST_NEW (cfg, ins, *ip);
9744 target = ip + 4 + (gint32)read32(ip);
9750 inline_costs += BRANCH_COST;
9754 MonoBasicBlock **targets;
9755 MonoBasicBlock *default_bblock;
9756 MonoJumpInfoBBTable *table;
9757 int offset_reg = alloc_preg (cfg);
9758 int target_reg = alloc_preg (cfg);
9759 int table_reg = alloc_preg (cfg);
9760 int sum_reg = alloc_preg (cfg);
9761 gboolean use_op_switch;
9765 n = read32 (ip + 1);
9768 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9772 CHECK_OPSIZE (n * sizeof (guint32));
9773 target = ip + n * sizeof (guint32);
9775 GET_BBLOCK (cfg, default_bblock, target);
9776 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9778 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9779 for (i = 0; i < n; ++i) {
9780 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9781 targets [i] = tblock;
9782 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9786 if (sp != stack_start) {
9788 * Link the current bb with the targets as well, so handle_stack_args
9789 * will set their in_stack correctly.
9791 link_bblock (cfg, bblock, default_bblock);
9792 for (i = 0; i < n; ++i)
9793 link_bblock (cfg, bblock, targets [i]);
9795 handle_stack_args (cfg, stack_start, sp - stack_start);
9797 CHECK_UNVERIFIABLE (cfg);
9800 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9801 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9804 for (i = 0; i < n; ++i)
9805 link_bblock (cfg, bblock, targets [i]);
9807 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9808 table->table = targets;
9809 table->table_size = n;
9811 use_op_switch = FALSE;
9813 /* ARM implements SWITCH statements differently */
9814 /* FIXME: Make it use the generic implementation */
9815 if (!cfg->compile_aot)
9816 use_op_switch = TRUE;
9819 if (COMPILE_LLVM (cfg))
9820 use_op_switch = TRUE;
9822 cfg->cbb->has_jump_table = 1;
9824 if (use_op_switch) {
9825 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9826 ins->sreg1 = src1->dreg;
9827 ins->inst_p0 = table;
9828 ins->inst_many_bb = targets;
9829 ins->klass = GUINT_TO_POINTER (n);
9830 MONO_ADD_INS (cfg->cbb, ins);
9832 if (sizeof (gpointer) == 8)
9833 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9837 #if SIZEOF_REGISTER == 8
9838 /* The upper word might not be zero, and we add it to a 64 bit address later */
9839 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9842 if (cfg->compile_aot) {
9843 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9845 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9846 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9847 ins->inst_p0 = table;
9848 ins->dreg = table_reg;
9849 MONO_ADD_INS (cfg->cbb, ins);
9852 /* FIXME: Use load_memindex */
9853 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9854 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9855 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9857 start_new_bblock = 1;
9858 inline_costs += (BRANCH_COST * 2);
9878 dreg = alloc_freg (cfg);
9881 dreg = alloc_lreg (cfg);
9884 dreg = alloc_ireg_ref (cfg);
9887 dreg = alloc_preg (cfg);
9890 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9891 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9892 if (*ip == CEE_LDIND_R4)
9893 ins->type = cfg->r4_stack_type;
9894 ins->flags |= ins_flag;
9895 MONO_ADD_INS (bblock, ins);
9897 if (ins_flag & MONO_INST_VOLATILE) {
9898 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9899 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9915 if (ins_flag & MONO_INST_VOLATILE) {
9916 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9917 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9920 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9921 ins->flags |= ins_flag;
9924 MONO_ADD_INS (bblock, ins);
9926 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9927 emit_write_barrier (cfg, sp [0], sp [1]);
9936 MONO_INST_NEW (cfg, ins, (*ip));
9938 ins->sreg1 = sp [0]->dreg;
9939 ins->sreg2 = sp [1]->dreg;
9940 type_from_op (cfg, ins, sp [0], sp [1]);
9942 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9944 /* Use the immediate opcodes if possible */
9945 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9946 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9947 if (imm_opcode != -1) {
9948 ins->opcode = imm_opcode;
9949 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9952 NULLIFY_INS (sp [1]);
9956 MONO_ADD_INS ((cfg)->cbb, (ins));
9958 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
9975 MONO_INST_NEW (cfg, ins, (*ip));
9977 ins->sreg1 = sp [0]->dreg;
9978 ins->sreg2 = sp [1]->dreg;
9979 type_from_op (cfg, ins, sp [0], sp [1]);
9981 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9982 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9984 /* FIXME: Pass opcode to is_inst_imm */
9986 /* Use the immediate opcodes if possible */
9987 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9990 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9991 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9992 /* Keep emulated opcodes which are optimized away later */
9993 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9994 imm_opcode = mono_op_to_op_imm (ins->opcode);
9997 if (imm_opcode != -1) {
9998 ins->opcode = imm_opcode;
9999 if (sp [1]->opcode == OP_I8CONST) {
10000 #if SIZEOF_REGISTER == 8
10001 ins->inst_imm = sp [1]->inst_l;
10003 ins->inst_ls_word = sp [1]->inst_ls_word;
10004 ins->inst_ms_word = sp [1]->inst_ms_word;
10008 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10011 /* Might be followed by an instruction added by add_widen_op */
10012 if (sp [1]->next == NULL)
10013 NULLIFY_INS (sp [1]);
10016 MONO_ADD_INS ((cfg)->cbb, (ins));
10018 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10031 case CEE_CONV_OVF_I8:
10032 case CEE_CONV_OVF_U8:
10033 case CEE_CONV_R_UN:
10036 /* Special case this earlier so we have long constants in the IR */
10037 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10038 int data = sp [-1]->inst_c0;
10039 sp [-1]->opcode = OP_I8CONST;
10040 sp [-1]->type = STACK_I8;
10041 #if SIZEOF_REGISTER == 8
10042 if ((*ip) == CEE_CONV_U8)
10043 sp [-1]->inst_c0 = (guint32)data;
10045 sp [-1]->inst_c0 = data;
10047 sp [-1]->inst_ls_word = data;
10048 if ((*ip) == CEE_CONV_U8)
10049 sp [-1]->inst_ms_word = 0;
10051 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10053 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10060 case CEE_CONV_OVF_I4:
10061 case CEE_CONV_OVF_I1:
10062 case CEE_CONV_OVF_I2:
10063 case CEE_CONV_OVF_I:
10064 case CEE_CONV_OVF_U:
10067 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10068 ADD_UNOP (CEE_CONV_OVF_I8);
10075 case CEE_CONV_OVF_U1:
10076 case CEE_CONV_OVF_U2:
10077 case CEE_CONV_OVF_U4:
10080 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10081 ADD_UNOP (CEE_CONV_OVF_U8);
10088 case CEE_CONV_OVF_I1_UN:
10089 case CEE_CONV_OVF_I2_UN:
10090 case CEE_CONV_OVF_I4_UN:
10091 case CEE_CONV_OVF_I8_UN:
10092 case CEE_CONV_OVF_U1_UN:
10093 case CEE_CONV_OVF_U2_UN:
10094 case CEE_CONV_OVF_U4_UN:
10095 case CEE_CONV_OVF_U8_UN:
10096 case CEE_CONV_OVF_I_UN:
10097 case CEE_CONV_OVF_U_UN:
10104 CHECK_CFG_EXCEPTION;
10108 case CEE_ADD_OVF_UN:
10110 case CEE_MUL_OVF_UN:
10112 case CEE_SUB_OVF_UN:
10118 GSHAREDVT_FAILURE (*ip);
10121 token = read32 (ip + 1);
10122 klass = mini_get_class (method, token, generic_context);
10123 CHECK_TYPELOAD (klass);
10125 if (generic_class_is_reference_type (cfg, klass)) {
10126 MonoInst *store, *load;
10127 int dreg = alloc_ireg_ref (cfg);
10129 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10130 load->flags |= ins_flag;
10131 MONO_ADD_INS (cfg->cbb, load);
10133 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10134 store->flags |= ins_flag;
10135 MONO_ADD_INS (cfg->cbb, store);
10137 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10138 emit_write_barrier (cfg, sp [0], sp [1]);
10140 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10146 int loc_index = -1;
10152 token = read32 (ip + 1);
10153 klass = mini_get_class (method, token, generic_context);
10154 CHECK_TYPELOAD (klass);
10156 /* Optimize the common ldobj+stloc combination */
10159 loc_index = ip [6];
10166 loc_index = ip [5] - CEE_STLOC_0;
10173 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10174 CHECK_LOCAL (loc_index);
10176 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10177 ins->dreg = cfg->locals [loc_index]->dreg;
10178 ins->flags |= ins_flag;
10181 if (ins_flag & MONO_INST_VOLATILE) {
10182 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10183 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10189 /* Optimize the ldobj+stobj combination */
10190 /* The reference case ends up being a load+store anyway */
10191 /* Skip this if the operation is volatile. */
10192 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10197 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10204 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10205 ins->flags |= ins_flag;
10208 if (ins_flag & MONO_INST_VOLATILE) {
10209 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10210 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10219 CHECK_STACK_OVF (1);
10221 n = read32 (ip + 1);
10223 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10224 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10225 ins->type = STACK_OBJ;
10228 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10229 MonoInst *iargs [1];
10230 char *str = mono_method_get_wrapper_data (method, n);
10232 if (cfg->compile_aot)
10233 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10235 EMIT_NEW_PCONST (cfg, iargs [0], str);
10236 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10238 if (cfg->opt & MONO_OPT_SHARED) {
10239 MonoInst *iargs [3];
10241 if (cfg->compile_aot) {
10242 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10244 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10245 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10246 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10247 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10248 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10250 if (bblock->out_of_line) {
10251 MonoInst *iargs [2];
10253 if (image == mono_defaults.corlib) {
10255 * Avoid relocations in AOT and save some space by using a
10256 * version of helper_ldstr specialized to mscorlib.
10258 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10259 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10261 /* Avoid creating the string object */
10262 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10263 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10264 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10268 if (cfg->compile_aot) {
10269 NEW_LDSTRCONST (cfg, ins, image, n);
10271 MONO_ADD_INS (bblock, ins);
10274 NEW_PCONST (cfg, ins, NULL);
10275 ins->type = STACK_OBJ;
10276 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10278 OUT_OF_MEMORY_FAILURE;
10281 MONO_ADD_INS (bblock, ins);
10290 MonoInst *iargs [2];
10291 MonoMethodSignature *fsig;
10294 MonoInst *vtable_arg = NULL;
10297 token = read32 (ip + 1);
10298 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10299 if (!cmethod || mono_loader_get_last_error ())
10301 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10304 mono_save_token_info (cfg, image, token, cmethod);
10306 if (!mono_class_init (cmethod->klass))
10307 TYPE_LOAD_ERROR (cmethod->klass);
10309 context_used = mini_method_check_context_used (cfg, cmethod);
10311 if (mono_security_cas_enabled ()) {
10312 if (check_linkdemand (cfg, method, cmethod))
10313 INLINE_FAILURE ("linkdemand");
10314 CHECK_CFG_EXCEPTION;
10315 } else if (mono_security_core_clr_enabled ()) {
10316 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10319 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10320 emit_generic_class_init (cfg, cmethod->klass);
10321 CHECK_TYPELOAD (cmethod->klass);
10325 if (cfg->gsharedvt) {
10326 if (mini_is_gsharedvt_variable_signature (sig))
10327 GSHAREDVT_FAILURE (*ip);
10331 n = fsig->param_count;
10335 * Generate smaller code for the common newobj <exception> instruction in
10336 * argument checking code.
10338 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10339 is_exception_class (cmethod->klass) && n <= 2 &&
10340 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10341 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10342 MonoInst *iargs [3];
10346 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10349 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10352 iargs [1] = sp [0];
10353 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10356 iargs [1] = sp [0];
10357 iargs [2] = sp [1];
10358 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10361 g_assert_not_reached ();
10369 /* move the args to allow room for 'this' in the first position */
10375 /* check_call_signature () requires sp[0] to be set */
10376 this_ins.type = STACK_OBJ;
10377 sp [0] = &this_ins;
10378 if (check_call_signature (cfg, fsig, sp))
10383 if (mini_class_is_system_array (cmethod->klass)) {
10384 *sp = emit_get_rgctx_method (cfg, context_used,
10385 cmethod, MONO_RGCTX_INFO_METHOD);
10387 /* Avoid varargs in the common case */
10388 if (fsig->param_count == 1)
10389 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10390 else if (fsig->param_count == 2)
10391 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10392 else if (fsig->param_count == 3)
10393 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10394 else if (fsig->param_count == 4)
10395 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10397 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10398 } else if (cmethod->string_ctor) {
10399 g_assert (!context_used);
10400 g_assert (!vtable_arg);
10401 /* we simply pass a null pointer */
10402 EMIT_NEW_PCONST (cfg, *sp, NULL);
10403 /* now call the string ctor */
10404 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10406 if (cmethod->klass->valuetype) {
10407 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10408 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10409 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10414 * The code generated by mini_emit_virtual_call () expects
10415 * iargs [0] to be a boxed instance, but luckily the vcall
10416 * will be transformed into a normal call there.
10418 } else if (context_used) {
10419 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10422 MonoVTable *vtable = NULL;
10424 if (!cfg->compile_aot)
10425 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10426 CHECK_TYPELOAD (cmethod->klass);
10429 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10430 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10431 * As a workaround, we call class cctors before allocating objects.
10433 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10434 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10435 if (cfg->verbose_level > 2)
10436 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10437 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10440 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10443 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10446 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10448 /* Now call the actual ctor */
10449 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10450 CHECK_CFG_EXCEPTION;
10453 if (alloc == NULL) {
10455 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10456 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10464 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10465 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10468 case CEE_CASTCLASS:
10472 token = read32 (ip + 1);
10473 klass = mini_get_class (method, token, generic_context);
10474 CHECK_TYPELOAD (klass);
10475 if (sp [0]->type != STACK_OBJ)
10478 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10479 CHECK_CFG_EXCEPTION;
10488 token = read32 (ip + 1);
10489 klass = mini_get_class (method, token, generic_context);
10490 CHECK_TYPELOAD (klass);
10491 if (sp [0]->type != STACK_OBJ)
10494 context_used = mini_class_check_context_used (cfg, klass);
10496 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10497 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10498 MonoInst *args [3];
10504 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10507 if (cfg->compile_aot)
10508 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
10510 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10512 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10515 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10516 MonoMethod *mono_isinst;
10517 MonoInst *iargs [1];
10520 mono_isinst = mono_marshal_get_isinst (klass);
10521 iargs [0] = sp [0];
10523 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10524 iargs, ip, cfg->real_offset, TRUE, &bblock);
10525 CHECK_CFG_EXCEPTION;
10526 g_assert (costs > 0);
10529 cfg->real_offset += 5;
10533 inline_costs += costs;
10536 ins = handle_isinst (cfg, klass, *sp, context_used);
10537 CHECK_CFG_EXCEPTION;
10544 case CEE_UNBOX_ANY: {
10545 MonoInst *res, *addr;
10550 token = read32 (ip + 1);
10551 klass = mini_get_class (method, token, generic_context);
10552 CHECK_TYPELOAD (klass);
10554 mono_save_token_info (cfg, image, token, klass);
10556 context_used = mini_class_check_context_used (cfg, klass);
10558 if (mini_is_gsharedvt_klass (cfg, klass)) {
10559 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10561 } else if (generic_class_is_reference_type (cfg, klass)) {
10562 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10563 CHECK_CFG_EXCEPTION;
10564 } else if (mono_class_is_nullable (klass)) {
10565 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10567 addr = handle_unbox (cfg, klass, sp, context_used);
10569 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10580 MonoClass *enum_class;
10581 MonoMethod *has_flag;
10587 token = read32 (ip + 1);
10588 klass = mini_get_class (method, token, generic_context);
10589 CHECK_TYPELOAD (klass);
10591 mono_save_token_info (cfg, image, token, klass);
10593 context_used = mini_class_check_context_used (cfg, klass);
10595 if (generic_class_is_reference_type (cfg, klass)) {
10601 if (klass == mono_defaults.void_class)
10603 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10605 /* frequent check in generic code: box (struct), brtrue */
10610 * <push int/long ptr>
10613 * constrained. MyFlags
10614 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10616 * If we find this sequence and the operand types on box and constrained
10617 * are equal, we can emit a specialized instruction sequence instead of
10618 * the very slow HasFlag () call.
10620 if ((cfg->opt & MONO_OPT_INTRINS) &&
10621 /* Cheap checks first. */
10622 ip + 5 + 6 + 5 < end &&
10623 ip [5] == CEE_PREFIX1 &&
10624 ip [6] == CEE_CONSTRAINED_ &&
10625 ip [11] == CEE_CALLVIRT &&
10626 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10627 mono_class_is_enum (klass) &&
10628 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10629 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10630 has_flag->klass == mono_defaults.enum_class &&
10631 !strcmp (has_flag->name, "HasFlag") &&
10632 has_flag->signature->hasthis &&
10633 has_flag->signature->param_count == 1) {
10634 CHECK_TYPELOAD (enum_class);
10636 if (enum_class == klass) {
10637 MonoInst *enum_this, *enum_flag;
10642 enum_this = sp [0];
10643 enum_flag = sp [1];
10645 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10650 // FIXME: LLVM can't handle the inconsistent bb linking
10651 if (!mono_class_is_nullable (klass) &&
10652 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10653 (ip [5] == CEE_BRTRUE ||
10654 ip [5] == CEE_BRTRUE_S ||
10655 ip [5] == CEE_BRFALSE ||
10656 ip [5] == CEE_BRFALSE_S)) {
10657 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10659 MonoBasicBlock *true_bb, *false_bb;
10663 if (cfg->verbose_level > 3) {
10664 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10665 printf ("<box+brtrue opt>\n");
10670 case CEE_BRFALSE_S:
10673 target = ip + 1 + (signed char)(*ip);
10680 target = ip + 4 + (gint)(read32 (ip));
10684 g_assert_not_reached ();
10688 * We need to link both bblocks, since it is needed for handling stack
10689 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10690 * Branching to only one of them would lead to inconsistencies, so
10691 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10693 GET_BBLOCK (cfg, true_bb, target);
10694 GET_BBLOCK (cfg, false_bb, ip);
10696 mono_link_bblock (cfg, cfg->cbb, true_bb);
10697 mono_link_bblock (cfg, cfg->cbb, false_bb);
10699 if (sp != stack_start) {
10700 handle_stack_args (cfg, stack_start, sp - stack_start);
10702 CHECK_UNVERIFIABLE (cfg);
10705 if (COMPILE_LLVM (cfg)) {
10706 dreg = alloc_ireg (cfg);
10707 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10708 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10710 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10712 /* The JIT can't eliminate the iconst+compare */
10713 MONO_INST_NEW (cfg, ins, OP_BR);
10714 ins->inst_target_bb = is_true ? true_bb : false_bb;
10715 MONO_ADD_INS (cfg->cbb, ins);
10718 start_new_bblock = 1;
10722 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10724 CHECK_CFG_EXCEPTION;
10733 token = read32 (ip + 1);
10734 klass = mini_get_class (method, token, generic_context);
10735 CHECK_TYPELOAD (klass);
10737 mono_save_token_info (cfg, image, token, klass);
10739 context_used = mini_class_check_context_used (cfg, klass);
10741 if (mono_class_is_nullable (klass)) {
10744 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10745 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10749 ins = handle_unbox (cfg, klass, sp, context_used);
10762 MonoClassField *field;
10763 #ifndef DISABLE_REMOTING
10767 gboolean is_instance;
10769 gpointer addr = NULL;
10770 gboolean is_special_static;
10772 MonoInst *store_val = NULL;
10773 MonoInst *thread_ins;
10776 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10778 if (op == CEE_STFLD) {
10781 store_val = sp [1];
10786 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10788 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10791 if (op == CEE_STSFLD) {
10794 store_val = sp [0];
10799 token = read32 (ip + 1);
10800 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10801 field = mono_method_get_wrapper_data (method, token);
10802 klass = field->parent;
10805 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10808 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10809 FIELD_ACCESS_FAILURE (method, field);
10810 mono_class_init (klass);
10812 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10815 /* if the class is Critical then transparent code cannot access it's fields */
10816 if (!is_instance && mono_security_core_clr_enabled ())
10817 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10819 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10820 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10821 if (mono_security_core_clr_enabled ())
10822 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10826 * LDFLD etc. is usable on static fields as well, so convert those cases to
10829 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10841 g_assert_not_reached ();
10843 is_instance = FALSE;
10846 context_used = mini_class_check_context_used (cfg, klass);
10848 /* INSTANCE CASE */
10850 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10851 if (op == CEE_STFLD) {
10852 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10854 #ifndef DISABLE_REMOTING
10855 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10856 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10857 MonoInst *iargs [5];
10859 GSHAREDVT_FAILURE (op);
10861 iargs [0] = sp [0];
10862 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10863 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10864 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10866 iargs [4] = sp [1];
10868 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10869 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10870 iargs, ip, cfg->real_offset, TRUE, &bblock);
10871 CHECK_CFG_EXCEPTION;
10872 g_assert (costs > 0);
10874 cfg->real_offset += 5;
10876 inline_costs += costs;
10878 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10885 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10887 if (mini_is_gsharedvt_klass (cfg, klass)) {
10888 MonoInst *offset_ins;
10890 context_used = mini_class_check_context_used (cfg, klass);
10892 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10893 dreg = alloc_ireg_mp (cfg);
10894 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10895 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10896 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10898 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10900 if (sp [0]->opcode != OP_LDADDR)
10901 store->flags |= MONO_INST_FAULT;
10903 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10904 /* insert call to write barrier */
10908 dreg = alloc_ireg_mp (cfg);
10909 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10910 emit_write_barrier (cfg, ptr, sp [1]);
10913 store->flags |= ins_flag;
10920 #ifndef DISABLE_REMOTING
10921 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10922 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10923 MonoInst *iargs [4];
10925 GSHAREDVT_FAILURE (op);
10927 iargs [0] = sp [0];
10928 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10929 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10930 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10931 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10932 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10933 iargs, ip, cfg->real_offset, TRUE, &bblock);
10934 CHECK_CFG_EXCEPTION;
10935 g_assert (costs > 0);
10937 cfg->real_offset += 5;
10941 inline_costs += costs;
10943 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10949 if (sp [0]->type == STACK_VTYPE) {
10952 /* Have to compute the address of the variable */
10954 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10956 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10958 g_assert (var->klass == klass);
10960 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10964 if (op == CEE_LDFLDA) {
10965 if (is_magic_tls_access (field)) {
10966 GSHAREDVT_FAILURE (*ip);
10968 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10970 if (sp [0]->type == STACK_OBJ) {
10971 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10972 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10975 dreg = alloc_ireg_mp (cfg);
10977 if (mini_is_gsharedvt_klass (cfg, klass)) {
10978 MonoInst *offset_ins;
10980 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10981 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10983 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10985 ins->klass = mono_class_from_mono_type (field->type);
10986 ins->type = STACK_MP;
10992 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10994 if (mini_is_gsharedvt_klass (cfg, klass)) {
10995 MonoInst *offset_ins;
10997 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10998 dreg = alloc_ireg_mp (cfg);
10999 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11000 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11002 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11004 load->flags |= ins_flag;
11005 if (sp [0]->opcode != OP_LDADDR)
11006 load->flags |= MONO_INST_FAULT;
11020 * We can only support shared generic static
11021 * field access on architectures where the
11022 * trampoline code has been extended to handle
11023 * the generic class init.
11025 #ifndef MONO_ARCH_VTABLE_REG
11026 GENERIC_SHARING_FAILURE (op);
11029 context_used = mini_class_check_context_used (cfg, klass);
11031 ftype = mono_field_get_type (field);
11033 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11036 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11037 * to be called here.
11039 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11040 mono_class_vtable (cfg->domain, klass);
11041 CHECK_TYPELOAD (klass);
11043 mono_domain_lock (cfg->domain);
11044 if (cfg->domain->special_static_fields)
11045 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11046 mono_domain_unlock (cfg->domain);
11048 is_special_static = mono_class_field_is_special_static (field);
11050 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11051 thread_ins = mono_get_thread_intrinsic (cfg);
11055 /* Generate IR to compute the field address */
11056 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11058 * Fast access to TLS data
11059 * Inline version of get_thread_static_data () in
11063 int idx, static_data_reg, array_reg, dreg;
11065 GSHAREDVT_FAILURE (op);
11067 // offset &= 0x7fffffff;
11068 // idx = (offset >> 24) - 1;
11069 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
11070 MONO_ADD_INS (cfg->cbb, thread_ins);
11071 static_data_reg = alloc_ireg (cfg);
11072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11074 if (cfg->compile_aot) {
11075 int offset_reg, offset2_reg, idx_reg;
11077 /* For TLS variables, this will return the TLS offset */
11078 EMIT_NEW_SFLDACONST (cfg, ins, field);
11079 offset_reg = ins->dreg;
11080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11081 idx_reg = alloc_ireg (cfg);
11082 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
11083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
11084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11085 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11086 array_reg = alloc_ireg (cfg);
11087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11088 offset2_reg = alloc_ireg (cfg);
11089 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
11090 dreg = alloc_ireg (cfg);
11091 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11093 offset = (gsize)addr & 0x7fffffff;
11094 idx = (offset >> 24) - 1;
11096 array_reg = alloc_ireg (cfg);
11097 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11098 dreg = alloc_ireg (cfg);
11099 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
11101 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11102 (cfg->compile_aot && is_special_static) ||
11103 (context_used && is_special_static)) {
11104 MonoInst *iargs [2];
11106 g_assert (field->parent);
11107 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11108 if (context_used) {
11109 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11110 field, MONO_RGCTX_INFO_CLASS_FIELD);
11112 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11114 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11115 } else if (context_used) {
11116 MonoInst *static_data;
11119 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11120 method->klass->name_space, method->klass->name, method->name,
11121 depth, field->offset);
11124 if (mono_class_needs_cctor_run (klass, method))
11125 emit_generic_class_init (cfg, klass);
11128 * The pointer we're computing here is
11130 * super_info.static_data + field->offset
11132 static_data = emit_get_rgctx_klass (cfg, context_used,
11133 klass, MONO_RGCTX_INFO_STATIC_DATA);
11135 if (mini_is_gsharedvt_klass (cfg, klass)) {
11136 MonoInst *offset_ins;
11138 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11139 dreg = alloc_ireg_mp (cfg);
11140 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11141 } else if (field->offset == 0) {
11144 int addr_reg = mono_alloc_preg (cfg);
11145 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11147 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11148 MonoInst *iargs [2];
11150 g_assert (field->parent);
11151 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11152 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11153 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11155 MonoVTable *vtable = NULL;
11157 if (!cfg->compile_aot)
11158 vtable = mono_class_vtable (cfg->domain, klass);
11159 CHECK_TYPELOAD (klass);
11162 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11163 if (!(g_slist_find (class_inits, klass))) {
11164 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11165 if (cfg->verbose_level > 2)
11166 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11167 class_inits = g_slist_prepend (class_inits, klass);
11170 if (cfg->run_cctors) {
11172 /* This makes so that inline cannot trigger */
11173 /* .cctors: too many apps depend on them */
11174 /* running with a specific order... */
11176 if (! vtable->initialized)
11177 INLINE_FAILURE ("class init");
11178 ex = mono_runtime_class_init_full (vtable, FALSE);
11180 set_exception_object (cfg, ex);
11181 goto exception_exit;
11185 if (cfg->compile_aot)
11186 EMIT_NEW_SFLDACONST (cfg, ins, field);
11189 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11191 EMIT_NEW_PCONST (cfg, ins, addr);
11194 MonoInst *iargs [1];
11195 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11196 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11200 /* Generate IR to do the actual load/store operation */
11202 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11203 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11204 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11207 if (op == CEE_LDSFLDA) {
11208 ins->klass = mono_class_from_mono_type (ftype);
11209 ins->type = STACK_PTR;
11211 } else if (op == CEE_STSFLD) {
11214 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11215 store->flags |= ins_flag;
11217 gboolean is_const = FALSE;
11218 MonoVTable *vtable = NULL;
11219 gpointer addr = NULL;
11221 if (!context_used) {
11222 vtable = mono_class_vtable (cfg->domain, klass);
11223 CHECK_TYPELOAD (klass);
11225 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11226 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11227 int ro_type = ftype->type;
11229 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11230 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11231 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11234 GSHAREDVT_FAILURE (op);
11236 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11239 case MONO_TYPE_BOOLEAN:
11241 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11245 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11248 case MONO_TYPE_CHAR:
11250 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11254 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11259 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11263 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11268 case MONO_TYPE_PTR:
11269 case MONO_TYPE_FNPTR:
11270 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11271 type_to_eval_stack_type ((cfg), field->type, *sp);
11274 case MONO_TYPE_STRING:
11275 case MONO_TYPE_OBJECT:
11276 case MONO_TYPE_CLASS:
11277 case MONO_TYPE_SZARRAY:
11278 case MONO_TYPE_ARRAY:
11279 if (!mono_gc_is_moving ()) {
11280 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11281 type_to_eval_stack_type ((cfg), field->type, *sp);
11289 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11294 case MONO_TYPE_VALUETYPE:
11304 CHECK_STACK_OVF (1);
11306 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11307 load->flags |= ins_flag;
11313 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11314 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11315 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11326 token = read32 (ip + 1);
11327 klass = mini_get_class (method, token, generic_context);
11328 CHECK_TYPELOAD (klass);
11329 if (ins_flag & MONO_INST_VOLATILE) {
11330 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11331 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11333 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11334 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11335 ins->flags |= ins_flag;
11336 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11337 generic_class_is_reference_type (cfg, klass)) {
11338 /* insert call to write barrier */
11339 emit_write_barrier (cfg, sp [0], sp [1]);
11351 const char *data_ptr;
11353 guint32 field_token;
11359 token = read32 (ip + 1);
11361 klass = mini_get_class (method, token, generic_context);
11362 CHECK_TYPELOAD (klass);
11364 context_used = mini_class_check_context_used (cfg, klass);
11366 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11367 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11368 ins->sreg1 = sp [0]->dreg;
11369 ins->type = STACK_I4;
11370 ins->dreg = alloc_ireg (cfg);
11371 MONO_ADD_INS (cfg->cbb, ins);
11372 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11375 if (context_used) {
11376 MonoInst *args [3];
11377 MonoClass *array_class = mono_array_class_get (klass, 1);
11378 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11380 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11383 args [0] = emit_get_rgctx_klass (cfg, context_used,
11384 array_class, MONO_RGCTX_INFO_VTABLE);
11389 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11391 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11393 if (cfg->opt & MONO_OPT_SHARED) {
11394 /* Decompose now to avoid problems with references to the domainvar */
11395 MonoInst *iargs [3];
11397 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11398 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11399 iargs [2] = sp [0];
11401 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11403 /* Decompose later since it is needed by abcrem */
11404 MonoClass *array_type = mono_array_class_get (klass, 1);
11405 mono_class_vtable (cfg->domain, array_type);
11406 CHECK_TYPELOAD (array_type);
11408 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11409 ins->dreg = alloc_ireg_ref (cfg);
11410 ins->sreg1 = sp [0]->dreg;
11411 ins->inst_newa_class = klass;
11412 ins->type = STACK_OBJ;
11413 ins->klass = array_type;
11414 MONO_ADD_INS (cfg->cbb, ins);
11415 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11416 cfg->cbb->has_array_access = TRUE;
11418 /* Needed so mono_emit_load_get_addr () gets called */
11419 mono_get_got_var (cfg);
11429 * we inline/optimize the initialization sequence if possible.
11430 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11431 * for small sizes open code the memcpy
11432 * ensure the rva field is big enough
11434 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11435 MonoMethod *memcpy_method = get_memcpy_method ();
11436 MonoInst *iargs [3];
11437 int add_reg = alloc_ireg_mp (cfg);
11439 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11440 if (cfg->compile_aot) {
11441 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11443 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11445 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11446 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11455 if (sp [0]->type != STACK_OBJ)
11458 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11459 ins->dreg = alloc_preg (cfg);
11460 ins->sreg1 = sp [0]->dreg;
11461 ins->type = STACK_I4;
11462 /* This flag will be inherited by the decomposition */
11463 ins->flags |= MONO_INST_FAULT;
11464 MONO_ADD_INS (cfg->cbb, ins);
11465 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11466 cfg->cbb->has_array_access = TRUE;
11474 if (sp [0]->type != STACK_OBJ)
11477 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11479 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11480 CHECK_TYPELOAD (klass);
11481 /* we need to make sure that this array is exactly the type it needs
11482 * to be for correctness. the wrappers are lax with their usage
11483 * so we need to ignore them here
11485 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11486 MonoClass *array_class = mono_array_class_get (klass, 1);
11487 mini_emit_check_array_type (cfg, sp [0], array_class);
11488 CHECK_TYPELOAD (array_class);
11492 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11497 case CEE_LDELEM_I1:
11498 case CEE_LDELEM_U1:
11499 case CEE_LDELEM_I2:
11500 case CEE_LDELEM_U2:
11501 case CEE_LDELEM_I4:
11502 case CEE_LDELEM_U4:
11503 case CEE_LDELEM_I8:
11505 case CEE_LDELEM_R4:
11506 case CEE_LDELEM_R8:
11507 case CEE_LDELEM_REF: {
11513 if (*ip == CEE_LDELEM) {
11515 token = read32 (ip + 1);
11516 klass = mini_get_class (method, token, generic_context);
11517 CHECK_TYPELOAD (klass);
11518 mono_class_init (klass);
11521 klass = array_access_to_klass (*ip);
11523 if (sp [0]->type != STACK_OBJ)
11526 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11528 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11529 // FIXME-VT: OP_ICONST optimization
11530 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11531 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11532 ins->opcode = OP_LOADV_MEMBASE;
11533 } else if (sp [1]->opcode == OP_ICONST) {
11534 int array_reg = sp [0]->dreg;
11535 int index_reg = sp [1]->dreg;
11536 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11538 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11539 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11541 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11542 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11545 if (*ip == CEE_LDELEM)
11552 case CEE_STELEM_I1:
11553 case CEE_STELEM_I2:
11554 case CEE_STELEM_I4:
11555 case CEE_STELEM_I8:
11556 case CEE_STELEM_R4:
11557 case CEE_STELEM_R8:
11558 case CEE_STELEM_REF:
11563 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11565 if (*ip == CEE_STELEM) {
11567 token = read32 (ip + 1);
11568 klass = mini_get_class (method, token, generic_context);
11569 CHECK_TYPELOAD (klass);
11570 mono_class_init (klass);
11573 klass = array_access_to_klass (*ip);
11575 if (sp [0]->type != STACK_OBJ)
11578 emit_array_store (cfg, klass, sp, TRUE);
11580 if (*ip == CEE_STELEM)
11587 case CEE_CKFINITE: {
11591 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11592 ins->sreg1 = sp [0]->dreg;
11593 ins->dreg = alloc_freg (cfg);
11594 ins->type = STACK_R8;
11595 MONO_ADD_INS (bblock, ins);
11597 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11602 case CEE_REFANYVAL: {
11603 MonoInst *src_var, *src;
11605 int klass_reg = alloc_preg (cfg);
11606 int dreg = alloc_preg (cfg);
11608 GSHAREDVT_FAILURE (*ip);
11611 MONO_INST_NEW (cfg, ins, *ip);
11614 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11615 CHECK_TYPELOAD (klass);
11617 context_used = mini_class_check_context_used (cfg, klass);
11620 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11622 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11623 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11626 if (context_used) {
11627 MonoInst *klass_ins;
11629 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11630 klass, MONO_RGCTX_INFO_KLASS);
11633 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11634 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11636 mini_emit_class_check (cfg, klass_reg, klass);
11638 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11639 ins->type = STACK_MP;
11644 case CEE_MKREFANY: {
11645 MonoInst *loc, *addr;
11647 GSHAREDVT_FAILURE (*ip);
11650 MONO_INST_NEW (cfg, ins, *ip);
11653 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11654 CHECK_TYPELOAD (klass);
11656 context_used = mini_class_check_context_used (cfg, klass);
11658 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11659 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11661 if (context_used) {
11662 MonoInst *const_ins;
11663 int type_reg = alloc_preg (cfg);
11665 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11666 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11667 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11669 } else if (cfg->compile_aot) {
11670 int const_reg = alloc_preg (cfg);
11671 int type_reg = alloc_preg (cfg);
11673 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11678 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11679 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11683 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11684 ins->type = STACK_VTYPE;
11685 ins->klass = mono_defaults.typed_reference_class;
11690 case CEE_LDTOKEN: {
11692 MonoClass *handle_class;
11694 CHECK_STACK_OVF (1);
11697 n = read32 (ip + 1);
11699 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11700 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11701 handle = mono_method_get_wrapper_data (method, n);
11702 handle_class = mono_method_get_wrapper_data (method, n + 1);
11703 if (handle_class == mono_defaults.typehandle_class)
11704 handle = &((MonoClass*)handle)->byval_arg;
11707 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11712 mono_class_init (handle_class);
11713 if (cfg->generic_sharing_context) {
11714 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11715 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11716 /* This case handles ldtoken
11717 of an open type, like for
11720 } else if (handle_class == mono_defaults.typehandle_class) {
11721 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11722 } else if (handle_class == mono_defaults.fieldhandle_class)
11723 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11724 else if (handle_class == mono_defaults.methodhandle_class)
11725 context_used = mini_method_check_context_used (cfg, handle);
11727 g_assert_not_reached ();
11730 if ((cfg->opt & MONO_OPT_SHARED) &&
11731 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11732 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11733 MonoInst *addr, *vtvar, *iargs [3];
11734 int method_context_used;
11736 method_context_used = mini_method_check_context_used (cfg, method);
11738 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11740 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11741 EMIT_NEW_ICONST (cfg, iargs [1], n);
11742 if (method_context_used) {
11743 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11744 method, MONO_RGCTX_INFO_METHOD);
11745 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11747 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11748 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11750 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11752 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11754 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11756 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11757 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11758 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11759 (cmethod->klass == mono_defaults.systemtype_class) &&
11760 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11761 MonoClass *tclass = mono_class_from_mono_type (handle);
11763 mono_class_init (tclass);
11764 if (context_used) {
11765 ins = emit_get_rgctx_klass (cfg, context_used,
11766 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11767 } else if (cfg->compile_aot) {
11768 if (method->wrapper_type) {
11769 mono_error_init (&error); //got to do it since there are multiple conditionals below
11770 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11771 /* Special case for static synchronized wrappers */
11772 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11774 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11775 /* FIXME: n is not a normal token */
11777 EMIT_NEW_PCONST (cfg, ins, NULL);
11780 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11783 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11785 ins->type = STACK_OBJ;
11786 ins->klass = cmethod->klass;
11789 MonoInst *addr, *vtvar;
11791 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11793 if (context_used) {
11794 if (handle_class == mono_defaults.typehandle_class) {
11795 ins = emit_get_rgctx_klass (cfg, context_used,
11796 mono_class_from_mono_type (handle),
11797 MONO_RGCTX_INFO_TYPE);
11798 } else if (handle_class == mono_defaults.methodhandle_class) {
11799 ins = emit_get_rgctx_method (cfg, context_used,
11800 handle, MONO_RGCTX_INFO_METHOD);
11801 } else if (handle_class == mono_defaults.fieldhandle_class) {
11802 ins = emit_get_rgctx_field (cfg, context_used,
11803 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11805 g_assert_not_reached ();
11807 } else if (cfg->compile_aot) {
11808 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11810 EMIT_NEW_PCONST (cfg, ins, handle);
11812 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11814 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11824 MONO_INST_NEW (cfg, ins, OP_THROW);
11826 ins->sreg1 = sp [0]->dreg;
11828 bblock->out_of_line = TRUE;
11829 MONO_ADD_INS (bblock, ins);
11830 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11831 MONO_ADD_INS (bblock, ins);
11834 link_bblock (cfg, bblock, end_bblock);
11835 start_new_bblock = 1;
11837 case CEE_ENDFINALLY:
11838 /* mono_save_seq_point_info () depends on this */
11839 if (sp != stack_start)
11840 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11841 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11842 MONO_ADD_INS (bblock, ins);
11844 start_new_bblock = 1;
11847 * Control will leave the method so empty the stack, otherwise
11848 * the next basic block will start with a nonempty stack.
11850 while (sp != stack_start) {
11855 case CEE_LEAVE_S: {
11858 if (*ip == CEE_LEAVE) {
11860 target = ip + 5 + (gint32)read32(ip + 1);
11863 target = ip + 2 + (signed char)(ip [1]);
11866 /* empty the stack */
11867 while (sp != stack_start) {
11872 * If this leave statement is in a catch block, check for a
11873 * pending exception, and rethrow it if necessary.
11874 * We avoid doing this in runtime invoke wrappers, since those are called
11875 * by native code which excepts the wrapper to catch all exceptions.
11877 for (i = 0; i < header->num_clauses; ++i) {
11878 MonoExceptionClause *clause = &header->clauses [i];
11881 * Use <= in the final comparison to handle clauses with multiple
11882 * leave statements, like in bug #78024.
11883 * The ordering of the exception clauses guarantees that we find the
11884 * innermost clause.
11886 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11888 MonoBasicBlock *dont_throw;
11893 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11896 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11898 NEW_BBLOCK (cfg, dont_throw);
11901 * Currently, we always rethrow the abort exception, despite the
11902 * fact that this is not correct. See thread6.cs for an example.
11903 * But propagating the abort exception is more important than
11904 * getting the sematics right.
11906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11907 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11908 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11910 MONO_START_BB (cfg, dont_throw);
11915 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11917 MonoExceptionClause *clause;
11919 for (tmp = handlers; tmp; tmp = tmp->next) {
11920 clause = tmp->data;
11921 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11923 link_bblock (cfg, bblock, tblock);
11924 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11925 ins->inst_target_bb = tblock;
11926 ins->inst_eh_block = clause;
11927 MONO_ADD_INS (bblock, ins);
11928 bblock->has_call_handler = 1;
11929 if (COMPILE_LLVM (cfg)) {
11930 MonoBasicBlock *target_bb;
11933 * Link the finally bblock with the target, since it will
11934 * conceptually branch there.
11935 * FIXME: Have to link the bblock containing the endfinally.
11937 GET_BBLOCK (cfg, target_bb, target);
11938 link_bblock (cfg, tblock, target_bb);
11941 g_list_free (handlers);
11944 MONO_INST_NEW (cfg, ins, OP_BR);
11945 MONO_ADD_INS (bblock, ins);
11946 GET_BBLOCK (cfg, tblock, target);
11947 link_bblock (cfg, bblock, tblock);
11948 ins->inst_target_bb = tblock;
11949 start_new_bblock = 1;
11951 if (*ip == CEE_LEAVE)
11960 * Mono specific opcodes
11962 case MONO_CUSTOM_PREFIX: {
11964 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11968 case CEE_MONO_ICALL: {
11970 MonoJitICallInfo *info;
11972 token = read32 (ip + 2);
11973 func = mono_method_get_wrapper_data (method, token);
11974 info = mono_find_jit_icall_by_addr (func);
11976 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11979 CHECK_STACK (info->sig->param_count);
11980 sp -= info->sig->param_count;
11982 ins = mono_emit_jit_icall (cfg, info->func, sp);
11983 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11987 inline_costs += 10 * num_calls++;
11991 case CEE_MONO_LDPTR: {
11994 CHECK_STACK_OVF (1);
11996 token = read32 (ip + 2);
11998 ptr = mono_method_get_wrapper_data (method, token);
11999 /* FIXME: Generalize this */
12000 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
12001 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12006 EMIT_NEW_PCONST (cfg, ins, ptr);
12009 inline_costs += 10 * num_calls++;
12010 /* Can't embed random pointers into AOT code */
12014 case CEE_MONO_JIT_ICALL_ADDR: {
12015 MonoJitICallInfo *callinfo;
12018 CHECK_STACK_OVF (1);
12020 token = read32 (ip + 2);
12022 ptr = mono_method_get_wrapper_data (method, token);
12023 callinfo = mono_find_jit_icall_by_addr (ptr);
12024 g_assert (callinfo);
12025 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12028 inline_costs += 10 * num_calls++;
12031 case CEE_MONO_ICALL_ADDR: {
12032 MonoMethod *cmethod;
12035 CHECK_STACK_OVF (1);
12037 token = read32 (ip + 2);
12039 cmethod = mono_method_get_wrapper_data (method, token);
12041 if (cfg->compile_aot) {
12042 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12044 ptr = mono_lookup_internal_call (cmethod);
12046 EMIT_NEW_PCONST (cfg, ins, ptr);
12052 case CEE_MONO_VTADDR: {
12053 MonoInst *src_var, *src;
12059 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12060 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12065 case CEE_MONO_NEWOBJ: {
12066 MonoInst *iargs [2];
12068 CHECK_STACK_OVF (1);
12070 token = read32 (ip + 2);
12071 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12072 mono_class_init (klass);
12073 NEW_DOMAINCONST (cfg, iargs [0]);
12074 MONO_ADD_INS (cfg->cbb, iargs [0]);
12075 NEW_CLASSCONST (cfg, iargs [1], klass);
12076 MONO_ADD_INS (cfg->cbb, iargs [1]);
12077 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12079 inline_costs += 10 * num_calls++;
12082 case CEE_MONO_OBJADDR:
12085 MONO_INST_NEW (cfg, ins, OP_MOVE);
12086 ins->dreg = alloc_ireg_mp (cfg);
12087 ins->sreg1 = sp [0]->dreg;
12088 ins->type = STACK_MP;
12089 MONO_ADD_INS (cfg->cbb, ins);
12093 case CEE_MONO_LDNATIVEOBJ:
12095 * Similar to LDOBJ, but instead load the unmanaged
12096 * representation of the vtype to the stack.
12101 token = read32 (ip + 2);
12102 klass = mono_method_get_wrapper_data (method, token);
12103 g_assert (klass->valuetype);
12104 mono_class_init (klass);
12107 MonoInst *src, *dest, *temp;
12110 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12111 temp->backend.is_pinvoke = 1;
12112 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12113 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12115 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12116 dest->type = STACK_VTYPE;
12117 dest->klass = klass;
12123 case CEE_MONO_RETOBJ: {
12125 * Same as RET, but return the native representation of a vtype
12128 g_assert (cfg->ret);
12129 g_assert (mono_method_signature (method)->pinvoke);
12134 token = read32 (ip + 2);
12135 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12137 if (!cfg->vret_addr) {
12138 g_assert (cfg->ret_var_is_local);
12140 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12142 EMIT_NEW_RETLOADA (cfg, ins);
12144 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12146 if (sp != stack_start)
12149 MONO_INST_NEW (cfg, ins, OP_BR);
12150 ins->inst_target_bb = end_bblock;
12151 MONO_ADD_INS (bblock, ins);
12152 link_bblock (cfg, bblock, end_bblock);
12153 start_new_bblock = 1;
12157 case CEE_MONO_CISINST:
12158 case CEE_MONO_CCASTCLASS: {
12163 token = read32 (ip + 2);
12164 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12165 if (ip [1] == CEE_MONO_CISINST)
12166 ins = handle_cisinst (cfg, klass, sp [0]);
12168 ins = handle_ccastclass (cfg, klass, sp [0]);
12174 case CEE_MONO_SAVE_LMF:
12175 case CEE_MONO_RESTORE_LMF:
12176 #ifdef MONO_ARCH_HAVE_LMF_OPS
12177 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12178 MONO_ADD_INS (bblock, ins);
12179 cfg->need_lmf_area = TRUE;
12183 case CEE_MONO_CLASSCONST:
12184 CHECK_STACK_OVF (1);
12186 token = read32 (ip + 2);
12187 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12190 inline_costs += 10 * num_calls++;
12192 case CEE_MONO_NOT_TAKEN:
12193 bblock->out_of_line = TRUE;
12196 case CEE_MONO_TLS: {
12199 CHECK_STACK_OVF (1);
12201 key = (gint32)read32 (ip + 2);
12202 g_assert (key < TLS_KEY_NUM);
12204 ins = mono_create_tls_get (cfg, key);
12206 if (cfg->compile_aot) {
12208 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12209 ins->dreg = alloc_preg (cfg);
12210 ins->type = STACK_PTR;
12212 g_assert_not_reached ();
12215 ins->type = STACK_PTR;
12216 MONO_ADD_INS (bblock, ins);
12221 case CEE_MONO_DYN_CALL: {
12222 MonoCallInst *call;
12224 /* It would be easier to call a trampoline, but that would put an
12225 * extra frame on the stack, confusing exception handling. So
12226 * implement it inline using an opcode for now.
12229 if (!cfg->dyn_call_var) {
12230 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12231 /* prevent it from being register allocated */
12232 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12235 /* Has to use a call inst since it local regalloc expects it */
12236 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12237 ins = (MonoInst*)call;
12239 ins->sreg1 = sp [0]->dreg;
12240 ins->sreg2 = sp [1]->dreg;
12241 MONO_ADD_INS (bblock, ins);
12243 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12246 inline_costs += 10 * num_calls++;
12250 case CEE_MONO_MEMORY_BARRIER: {
12252 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12256 case CEE_MONO_JIT_ATTACH: {
12257 MonoInst *args [16], *domain_ins;
12258 MonoInst *ad_ins, *jit_tls_ins;
12259 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12261 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12263 EMIT_NEW_PCONST (cfg, ins, NULL);
12264 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12266 ad_ins = mono_get_domain_intrinsic (cfg);
12267 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12269 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12270 NEW_BBLOCK (cfg, next_bb);
12271 NEW_BBLOCK (cfg, call_bb);
12273 if (cfg->compile_aot) {
12274 /* AOT code is only used in the root domain */
12275 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12277 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12279 MONO_ADD_INS (cfg->cbb, ad_ins);
12280 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12281 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12283 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12285 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12287 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12288 MONO_START_BB (cfg, call_bb);
12291 if (cfg->compile_aot) {
12292 /* AOT code is only used in the root domain */
12293 EMIT_NEW_PCONST (cfg, args [0], NULL);
12295 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12297 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12298 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12301 MONO_START_BB (cfg, next_bb);
12307 case CEE_MONO_JIT_DETACH: {
12308 MonoInst *args [16];
12310 /* Restore the original domain */
12311 dreg = alloc_ireg (cfg);
12312 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12313 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12318 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12324 case CEE_PREFIX1: {
12327 case CEE_ARGLIST: {
12328 /* somewhat similar to LDTOKEN */
12329 MonoInst *addr, *vtvar;
12330 CHECK_STACK_OVF (1);
12331 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12333 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12334 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12336 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12337 ins->type = STACK_VTYPE;
12338 ins->klass = mono_defaults.argumenthandle_class;
12348 MonoInst *cmp, *arg1, *arg2;
12356 * The following transforms:
12357 * CEE_CEQ into OP_CEQ
12358 * CEE_CGT into OP_CGT
12359 * CEE_CGT_UN into OP_CGT_UN
12360 * CEE_CLT into OP_CLT
12361 * CEE_CLT_UN into OP_CLT_UN
12363 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12365 MONO_INST_NEW (cfg, ins, cmp->opcode);
12366 cmp->sreg1 = arg1->dreg;
12367 cmp->sreg2 = arg2->dreg;
12368 type_from_op (cfg, cmp, arg1, arg2);
12370 add_widen_op (cfg, cmp, &arg1, &arg2);
12371 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12372 cmp->opcode = OP_LCOMPARE;
12373 else if (arg1->type == STACK_R4)
12374 cmp->opcode = OP_RCOMPARE;
12375 else if (arg1->type == STACK_R8)
12376 cmp->opcode = OP_FCOMPARE;
12378 cmp->opcode = OP_ICOMPARE;
12379 MONO_ADD_INS (bblock, cmp);
12380 ins->type = STACK_I4;
12381 ins->dreg = alloc_dreg (cfg, ins->type);
12382 type_from_op (cfg, ins, arg1, arg2);
12384 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12386 * The backends expect the fceq opcodes to do the
12389 ins->sreg1 = cmp->sreg1;
12390 ins->sreg2 = cmp->sreg2;
12393 MONO_ADD_INS (bblock, ins);
12399 MonoInst *argconst;
12400 MonoMethod *cil_method;
12402 CHECK_STACK_OVF (1);
12404 n = read32 (ip + 2);
12405 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12406 if (!cmethod || mono_loader_get_last_error ())
12408 mono_class_init (cmethod->klass);
12410 mono_save_token_info (cfg, image, n, cmethod);
12412 context_used = mini_method_check_context_used (cfg, cmethod);
12414 cil_method = cmethod;
12415 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12416 METHOD_ACCESS_FAILURE (method, cil_method);
12418 if (mono_security_cas_enabled ()) {
12419 if (check_linkdemand (cfg, method, cmethod))
12420 INLINE_FAILURE ("linkdemand");
12421 CHECK_CFG_EXCEPTION;
12422 } else if (mono_security_core_clr_enabled ()) {
12423 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12427 * Optimize the common case of ldftn+delegate creation
12429 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12430 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12431 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12432 MonoInst *target_ins, *handle_ins;
12433 MonoMethod *invoke;
12434 int invoke_context_used;
12436 invoke = mono_get_delegate_invoke (ctor_method->klass);
12437 if (!invoke || !mono_method_signature (invoke))
12440 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12442 target_ins = sp [-1];
12444 if (mono_security_core_clr_enabled ())
12445 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12447 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12448 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12449 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12451 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12455 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12456 /* FIXME: SGEN support */
12457 if (invoke_context_used == 0) {
12459 if (cfg->verbose_level > 3)
12460 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12461 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12464 CHECK_CFG_EXCEPTION;
12475 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12476 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12480 inline_costs += 10 * num_calls++;
12483 case CEE_LDVIRTFTN: {
12484 MonoInst *args [2];
12488 n = read32 (ip + 2);
12489 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12490 if (!cmethod || mono_loader_get_last_error ())
12492 mono_class_init (cmethod->klass);
12494 context_used = mini_method_check_context_used (cfg, cmethod);
12496 if (mono_security_cas_enabled ()) {
12497 if (check_linkdemand (cfg, method, cmethod))
12498 INLINE_FAILURE ("linkdemand");
12499 CHECK_CFG_EXCEPTION;
12500 } else if (mono_security_core_clr_enabled ()) {
12501 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12505 * Optimize the common case of ldvirtftn+delegate creation
12507 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12508 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12509 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12510 MonoInst *target_ins, *handle_ins;
12511 MonoMethod *invoke;
12512 int invoke_context_used;
12514 invoke = mono_get_delegate_invoke (ctor_method->klass);
12515 if (!invoke || !mono_method_signature (invoke))
12518 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12520 target_ins = sp [-1];
12522 if (mono_security_core_clr_enabled ())
12523 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12525 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12526 /* FIXME: SGEN support */
12527 if (invoke_context_used == 0) {
12529 if (cfg->verbose_level > 3)
12530 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12531 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
12534 CHECK_CFG_EXCEPTION;
12548 args [1] = emit_get_rgctx_method (cfg, context_used,
12549 cmethod, MONO_RGCTX_INFO_METHOD);
12552 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12554 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12557 inline_costs += 10 * num_calls++;
12561 CHECK_STACK_OVF (1);
12563 n = read16 (ip + 2);
12565 EMIT_NEW_ARGLOAD (cfg, ins, n);
12570 CHECK_STACK_OVF (1);
12572 n = read16 (ip + 2);
12574 NEW_ARGLOADA (cfg, ins, n);
12575 MONO_ADD_INS (cfg->cbb, ins);
12583 n = read16 (ip + 2);
12585 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12587 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12591 CHECK_STACK_OVF (1);
12593 n = read16 (ip + 2);
12595 EMIT_NEW_LOCLOAD (cfg, ins, n);
12600 unsigned char *tmp_ip;
12601 CHECK_STACK_OVF (1);
12603 n = read16 (ip + 2);
12606 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12612 EMIT_NEW_LOCLOADA (cfg, ins, n);
12621 n = read16 (ip + 2);
12623 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12625 emit_stloc_ir (cfg, sp, header, n);
12632 if (sp != stack_start)
12634 if (cfg->method != method)
12636 * Inlining this into a loop in a parent could lead to
12637 * stack overflows which is different behavior than the
12638 * non-inlined case, thus disable inlining in this case.
12640 INLINE_FAILURE("localloc");
12642 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12643 ins->dreg = alloc_preg (cfg);
12644 ins->sreg1 = sp [0]->dreg;
12645 ins->type = STACK_PTR;
12646 MONO_ADD_INS (cfg->cbb, ins);
12648 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12650 ins->flags |= MONO_INST_INIT;
12655 case CEE_ENDFILTER: {
12656 MonoExceptionClause *clause, *nearest;
12657 int cc, nearest_num;
12661 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12663 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12664 ins->sreg1 = (*sp)->dreg;
12665 MONO_ADD_INS (bblock, ins);
12666 start_new_bblock = 1;
12671 for (cc = 0; cc < header->num_clauses; ++cc) {
12672 clause = &header->clauses [cc];
12673 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12674 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12675 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12680 g_assert (nearest);
12681 if ((ip - header->code) != nearest->handler_offset)
12686 case CEE_UNALIGNED_:
12687 ins_flag |= MONO_INST_UNALIGNED;
12688 /* FIXME: record alignment? we can assume 1 for now */
12692 case CEE_VOLATILE_:
12693 ins_flag |= MONO_INST_VOLATILE;
12697 ins_flag |= MONO_INST_TAILCALL;
12698 cfg->flags |= MONO_CFG_HAS_TAIL;
12699 /* Can't inline tail calls at this time */
12700 inline_costs += 100000;
12707 token = read32 (ip + 2);
12708 klass = mini_get_class (method, token, generic_context);
12709 CHECK_TYPELOAD (klass);
12710 if (generic_class_is_reference_type (cfg, klass))
12711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12713 mini_emit_initobj (cfg, *sp, NULL, klass);
12717 case CEE_CONSTRAINED_:
12719 token = read32 (ip + 2);
12720 constrained_call = mini_get_class (method, token, generic_context);
12721 CHECK_TYPELOAD (constrained_call);
12725 case CEE_INITBLK: {
12726 MonoInst *iargs [3];
12730 /* Skip optimized paths for volatile operations. */
12731 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12732 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12733 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12734 /* emit_memset only works when val == 0 */
12735 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12738 iargs [0] = sp [0];
12739 iargs [1] = sp [1];
12740 iargs [2] = sp [2];
12741 if (ip [1] == CEE_CPBLK) {
12743 * FIXME: It's unclear whether we should be emitting both the acquire
12744 * and release barriers for cpblk. It is technically both a load and
12745 * store operation, so it seems like that's the sensible thing to do.
12747 * FIXME: We emit full barriers on both sides of the operation for
12748 * simplicity. We should have a separate atomic memcpy method instead.
12750 MonoMethod *memcpy_method = get_memcpy_method ();
12752 if (ins_flag & MONO_INST_VOLATILE)
12753 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12755 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12756 call->flags |= ins_flag;
12758 if (ins_flag & MONO_INST_VOLATILE)
12759 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12761 MonoMethod *memset_method = get_memset_method ();
12762 if (ins_flag & MONO_INST_VOLATILE) {
12763 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12764 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12766 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12767 call->flags |= ins_flag;
12778 ins_flag |= MONO_INST_NOTYPECHECK;
12780 ins_flag |= MONO_INST_NORANGECHECK;
12781 /* we ignore the no-nullcheck for now since we
12782 * really do it explicitly only when doing callvirt->call
12786 case CEE_RETHROW: {
12788 int handler_offset = -1;
12790 for (i = 0; i < header->num_clauses; ++i) {
12791 MonoExceptionClause *clause = &header->clauses [i];
12792 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12793 handler_offset = clause->handler_offset;
12798 bblock->flags |= BB_EXCEPTION_UNSAFE;
12800 if (handler_offset == -1)
12803 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12804 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12805 ins->sreg1 = load->dreg;
12806 MONO_ADD_INS (bblock, ins);
12808 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12809 MONO_ADD_INS (bblock, ins);
12812 link_bblock (cfg, bblock, end_bblock);
12813 start_new_bblock = 1;
12821 CHECK_STACK_OVF (1);
12823 token = read32 (ip + 2);
12824 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12825 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12828 val = mono_type_size (type, &ialign);
12830 MonoClass *klass = mini_get_class (method, token, generic_context);
12831 CHECK_TYPELOAD (klass);
12833 val = mono_type_size (&klass->byval_arg, &ialign);
12835 if (mini_is_gsharedvt_klass (cfg, klass))
12836 GSHAREDVT_FAILURE (*ip);
12838 EMIT_NEW_ICONST (cfg, ins, val);
12843 case CEE_REFANYTYPE: {
12844 MonoInst *src_var, *src;
12846 GSHAREDVT_FAILURE (*ip);
12852 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12854 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12855 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12856 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12861 case CEE_READONLY_:
12874 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12884 g_warning ("opcode 0x%02x not handled", *ip);
12888 if (start_new_bblock != 1)
12891 bblock->cil_length = ip - bblock->cil_code;
12892 if (bblock->next_bb) {
12893 /* This could already be set because of inlining, #693905 */
12894 MonoBasicBlock *bb = bblock;
12896 while (bb->next_bb)
12898 bb->next_bb = end_bblock;
12900 bblock->next_bb = end_bblock;
12903 if (cfg->method == method && cfg->domainvar) {
12905 MonoInst *get_domain;
12907 cfg->cbb = init_localsbb;
12909 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12910 MONO_ADD_INS (cfg->cbb, get_domain);
12912 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12914 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12915 MONO_ADD_INS (cfg->cbb, store);
12918 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12919 if (cfg->compile_aot)
12920 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12921 mono_get_got_var (cfg);
12924 if (cfg->method == method && cfg->got_var)
12925 mono_emit_load_got_addr (cfg);
12927 if (init_localsbb) {
12928 cfg->cbb = init_localsbb;
12930 for (i = 0; i < header->num_locals; ++i) {
12931 emit_init_local (cfg, i, header->locals [i], init_locals);
12935 if (cfg->init_ref_vars && cfg->method == method) {
12936 /* Emit initialization for ref vars */
12937 // FIXME: Avoid duplication initialization for IL locals.
12938 for (i = 0; i < cfg->num_varinfo; ++i) {
12939 MonoInst *ins = cfg->varinfo [i];
12941 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12942 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12946 if (cfg->lmf_var && cfg->method == method) {
12947 cfg->cbb = init_localsbb;
12948 emit_push_lmf (cfg);
12951 cfg->cbb = init_localsbb;
12952 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12955 MonoBasicBlock *bb;
12958 * Make seq points at backward branch targets interruptable.
12960 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12961 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12962 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12965 /* Add a sequence point for method entry/exit events */
12966 if (cfg->gen_seq_points_debug_data) {
12967 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12968 MONO_ADD_INS (init_localsbb, ins);
12969 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12970 MONO_ADD_INS (cfg->bb_exit, ins);
12974 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12975 * the code they refer to was dead (#11880).
12977 if (sym_seq_points) {
12978 for (i = 0; i < header->code_size; ++i) {
12979 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12982 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12983 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12990 if (cfg->method == method) {
12991 MonoBasicBlock *bb;
12992 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12993 bb->region = mono_find_block_region (cfg, bb->real_offset);
12995 mono_create_spvar_for_region (cfg, bb->region);
12996 if (cfg->verbose_level > 2)
12997 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13001 if (inline_costs < 0) {
13004 /* Method is too large */
13005 mname = mono_method_full_name (method, TRUE);
13006 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13007 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13011 if ((cfg->verbose_level > 2) && (cfg->method == method))
13012 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13017 g_assert (!mono_error_ok (&cfg->error));
13021 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13025 set_exception_type_from_invalid_il (cfg, method, ip);
13029 g_slist_free (class_inits);
13030 mono_basic_block_free (original_bb);
13031 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13032 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13033 if (cfg->exception_type)
13036 return inline_costs;
13040 store_membase_reg_to_store_membase_imm (int opcode)
13043 case OP_STORE_MEMBASE_REG:
13044 return OP_STORE_MEMBASE_IMM;
13045 case OP_STOREI1_MEMBASE_REG:
13046 return OP_STOREI1_MEMBASE_IMM;
13047 case OP_STOREI2_MEMBASE_REG:
13048 return OP_STOREI2_MEMBASE_IMM;
13049 case OP_STOREI4_MEMBASE_REG:
13050 return OP_STOREI4_MEMBASE_IMM;
13051 case OP_STOREI8_MEMBASE_REG:
13052 return OP_STOREI8_MEMBASE_IMM;
13054 g_assert_not_reached ();
13061 mono_op_to_op_imm (int opcode)
13065 return OP_IADD_IMM;
13067 return OP_ISUB_IMM;
13069 return OP_IDIV_IMM;
13071 return OP_IDIV_UN_IMM;
13073 return OP_IREM_IMM;
13075 return OP_IREM_UN_IMM;
13077 return OP_IMUL_IMM;
13079 return OP_IAND_IMM;
13083 return OP_IXOR_IMM;
13085 return OP_ISHL_IMM;
13087 return OP_ISHR_IMM;
13089 return OP_ISHR_UN_IMM;
13092 return OP_LADD_IMM;
13094 return OP_LSUB_IMM;
13096 return OP_LAND_IMM;
13100 return OP_LXOR_IMM;
13102 return OP_LSHL_IMM;
13104 return OP_LSHR_IMM;
13106 return OP_LSHR_UN_IMM;
13107 #if SIZEOF_REGISTER == 8
13109 return OP_LREM_IMM;
13113 return OP_COMPARE_IMM;
13115 return OP_ICOMPARE_IMM;
13117 return OP_LCOMPARE_IMM;
13119 case OP_STORE_MEMBASE_REG:
13120 return OP_STORE_MEMBASE_IMM;
13121 case OP_STOREI1_MEMBASE_REG:
13122 return OP_STOREI1_MEMBASE_IMM;
13123 case OP_STOREI2_MEMBASE_REG:
13124 return OP_STOREI2_MEMBASE_IMM;
13125 case OP_STOREI4_MEMBASE_REG:
13126 return OP_STOREI4_MEMBASE_IMM;
13128 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13130 return OP_X86_PUSH_IMM;
13131 case OP_X86_COMPARE_MEMBASE_REG:
13132 return OP_X86_COMPARE_MEMBASE_IMM;
13134 #if defined(TARGET_AMD64)
13135 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13136 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13138 case OP_VOIDCALL_REG:
13139 return OP_VOIDCALL;
13147 return OP_LOCALLOC_IMM;
13154 ldind_to_load_membase (int opcode)
13158 return OP_LOADI1_MEMBASE;
13160 return OP_LOADU1_MEMBASE;
13162 return OP_LOADI2_MEMBASE;
13164 return OP_LOADU2_MEMBASE;
13166 return OP_LOADI4_MEMBASE;
13168 return OP_LOADU4_MEMBASE;
13170 return OP_LOAD_MEMBASE;
13171 case CEE_LDIND_REF:
13172 return OP_LOAD_MEMBASE;
13174 return OP_LOADI8_MEMBASE;
13176 return OP_LOADR4_MEMBASE;
13178 return OP_LOADR8_MEMBASE;
13180 g_assert_not_reached ();
13187 stind_to_store_membase (int opcode)
13191 return OP_STOREI1_MEMBASE_REG;
13193 return OP_STOREI2_MEMBASE_REG;
13195 return OP_STOREI4_MEMBASE_REG;
13197 case CEE_STIND_REF:
13198 return OP_STORE_MEMBASE_REG;
13200 return OP_STOREI8_MEMBASE_REG;
13202 return OP_STORER4_MEMBASE_REG;
13204 return OP_STORER8_MEMBASE_REG;
13206 g_assert_not_reached ();
13213 mono_load_membase_to_load_mem (int opcode)
13215 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13216 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13218 case OP_LOAD_MEMBASE:
13219 return OP_LOAD_MEM;
13220 case OP_LOADU1_MEMBASE:
13221 return OP_LOADU1_MEM;
13222 case OP_LOADU2_MEMBASE:
13223 return OP_LOADU2_MEM;
13224 case OP_LOADI4_MEMBASE:
13225 return OP_LOADI4_MEM;
13226 case OP_LOADU4_MEMBASE:
13227 return OP_LOADU4_MEM;
13228 #if SIZEOF_REGISTER == 8
13229 case OP_LOADI8_MEMBASE:
13230 return OP_LOADI8_MEM;
13239 op_to_op_dest_membase (int store_opcode, int opcode)
13241 #if defined(TARGET_X86)
13242 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13247 return OP_X86_ADD_MEMBASE_REG;
13249 return OP_X86_SUB_MEMBASE_REG;
13251 return OP_X86_AND_MEMBASE_REG;
13253 return OP_X86_OR_MEMBASE_REG;
13255 return OP_X86_XOR_MEMBASE_REG;
13258 return OP_X86_ADD_MEMBASE_IMM;
13261 return OP_X86_SUB_MEMBASE_IMM;
13264 return OP_X86_AND_MEMBASE_IMM;
13267 return OP_X86_OR_MEMBASE_IMM;
13270 return OP_X86_XOR_MEMBASE_IMM;
13276 #if defined(TARGET_AMD64)
13277 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13282 return OP_X86_ADD_MEMBASE_REG;
13284 return OP_X86_SUB_MEMBASE_REG;
13286 return OP_X86_AND_MEMBASE_REG;
13288 return OP_X86_OR_MEMBASE_REG;
13290 return OP_X86_XOR_MEMBASE_REG;
13292 return OP_X86_ADD_MEMBASE_IMM;
13294 return OP_X86_SUB_MEMBASE_IMM;
13296 return OP_X86_AND_MEMBASE_IMM;
13298 return OP_X86_OR_MEMBASE_IMM;
13300 return OP_X86_XOR_MEMBASE_IMM;
13302 return OP_AMD64_ADD_MEMBASE_REG;
13304 return OP_AMD64_SUB_MEMBASE_REG;
13306 return OP_AMD64_AND_MEMBASE_REG;
13308 return OP_AMD64_OR_MEMBASE_REG;
13310 return OP_AMD64_XOR_MEMBASE_REG;
13313 return OP_AMD64_ADD_MEMBASE_IMM;
13316 return OP_AMD64_SUB_MEMBASE_IMM;
13319 return OP_AMD64_AND_MEMBASE_IMM;
13322 return OP_AMD64_OR_MEMBASE_IMM;
13325 return OP_AMD64_XOR_MEMBASE_IMM;
13335 op_to_op_store_membase (int store_opcode, int opcode)
13337 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13340 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13341 return OP_X86_SETEQ_MEMBASE;
13343 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13344 return OP_X86_SETNE_MEMBASE;
13352 op_to_op_src1_membase (int load_opcode, int opcode)
13355 /* FIXME: This has sign extension issues */
13357 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13358 return OP_X86_COMPARE_MEMBASE8_IMM;
13361 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13366 return OP_X86_PUSH_MEMBASE;
13367 case OP_COMPARE_IMM:
13368 case OP_ICOMPARE_IMM:
13369 return OP_X86_COMPARE_MEMBASE_IMM;
13372 return OP_X86_COMPARE_MEMBASE_REG;
13376 #ifdef TARGET_AMD64
13377 /* FIXME: This has sign extension issues */
13379 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13380 return OP_X86_COMPARE_MEMBASE8_IMM;
13385 #ifdef __mono_ilp32__
13386 if (load_opcode == OP_LOADI8_MEMBASE)
13388 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13390 return OP_X86_PUSH_MEMBASE;
13392 /* FIXME: This only works for 32 bit immediates
13393 case OP_COMPARE_IMM:
13394 case OP_LCOMPARE_IMM:
13395 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13396 return OP_AMD64_COMPARE_MEMBASE_IMM;
13398 case OP_ICOMPARE_IMM:
13399 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13400 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13404 #ifdef __mono_ilp32__
13405 if (load_opcode == OP_LOAD_MEMBASE)
13406 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13407 if (load_opcode == OP_LOADI8_MEMBASE)
13409 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13411 return OP_AMD64_COMPARE_MEMBASE_REG;
13414 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13415 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13424 op_to_op_src2_membase (int load_opcode, int opcode)
13427 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13433 return OP_X86_COMPARE_REG_MEMBASE;
13435 return OP_X86_ADD_REG_MEMBASE;
13437 return OP_X86_SUB_REG_MEMBASE;
13439 return OP_X86_AND_REG_MEMBASE;
13441 return OP_X86_OR_REG_MEMBASE;
13443 return OP_X86_XOR_REG_MEMBASE;
13447 #ifdef TARGET_AMD64
13448 #ifdef __mono_ilp32__
13449 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13451 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13455 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13457 return OP_X86_ADD_REG_MEMBASE;
13459 return OP_X86_SUB_REG_MEMBASE;
13461 return OP_X86_AND_REG_MEMBASE;
13463 return OP_X86_OR_REG_MEMBASE;
13465 return OP_X86_XOR_REG_MEMBASE;
13467 #ifdef __mono_ilp32__
13468 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13470 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13475 return OP_AMD64_COMPARE_REG_MEMBASE;
13477 return OP_AMD64_ADD_REG_MEMBASE;
13479 return OP_AMD64_SUB_REG_MEMBASE;
13481 return OP_AMD64_AND_REG_MEMBASE;
13483 return OP_AMD64_OR_REG_MEMBASE;
13485 return OP_AMD64_XOR_REG_MEMBASE;
13494 mono_op_to_op_imm_noemul (int opcode)
13497 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13503 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13510 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13515 return mono_op_to_op_imm (opcode);
13520 * mono_handle_global_vregs:
13522 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13526 mono_handle_global_vregs (MonoCompile *cfg)
13528 gint32 *vreg_to_bb;
13529 MonoBasicBlock *bb;
13532 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13534 #ifdef MONO_ARCH_SIMD_INTRINSICS
13535 if (cfg->uses_simd_intrinsics)
13536 mono_simd_simplify_indirection (cfg);
13539 /* Find local vregs used in more than one bb */
13540 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13541 MonoInst *ins = bb->code;
13542 int block_num = bb->block_num;
13544 if (cfg->verbose_level > 2)
13545 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13548 for (; ins; ins = ins->next) {
13549 const char *spec = INS_INFO (ins->opcode);
13550 int regtype = 0, regindex;
13553 if (G_UNLIKELY (cfg->verbose_level > 2))
13554 mono_print_ins (ins);
13556 g_assert (ins->opcode >= MONO_CEE_LAST);
13558 for (regindex = 0; regindex < 4; regindex ++) {
13561 if (regindex == 0) {
13562 regtype = spec [MONO_INST_DEST];
13563 if (regtype == ' ')
13566 } else if (regindex == 1) {
13567 regtype = spec [MONO_INST_SRC1];
13568 if (regtype == ' ')
13571 } else if (regindex == 2) {
13572 regtype = spec [MONO_INST_SRC2];
13573 if (regtype == ' ')
13576 } else if (regindex == 3) {
13577 regtype = spec [MONO_INST_SRC3];
13578 if (regtype == ' ')
13583 #if SIZEOF_REGISTER == 4
13584 /* In the LLVM case, the long opcodes are not decomposed */
13585 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13587 * Since some instructions reference the original long vreg,
13588 * and some reference the two component vregs, it is quite hard
13589 * to determine when it needs to be global. So be conservative.
13591 if (!get_vreg_to_inst (cfg, vreg)) {
13592 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13594 if (cfg->verbose_level > 2)
13595 printf ("LONG VREG R%d made global.\n", vreg);
13599 * Make the component vregs volatile since the optimizations can
13600 * get confused otherwise.
13602 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13603 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13607 g_assert (vreg != -1);
13609 prev_bb = vreg_to_bb [vreg];
13610 if (prev_bb == 0) {
13611 /* 0 is a valid block num */
13612 vreg_to_bb [vreg] = block_num + 1;
13613 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13614 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13617 if (!get_vreg_to_inst (cfg, vreg)) {
13618 if (G_UNLIKELY (cfg->verbose_level > 2))
13619 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13623 if (vreg_is_ref (cfg, vreg))
13624 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13626 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13629 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13632 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13635 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13638 g_assert_not_reached ();
13642 /* Flag as having been used in more than one bb */
13643 vreg_to_bb [vreg] = -1;
13649 /* If a variable is used in only one bblock, convert it into a local vreg */
13650 for (i = 0; i < cfg->num_varinfo; i++) {
13651 MonoInst *var = cfg->varinfo [i];
13652 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13654 switch (var->type) {
13660 #if SIZEOF_REGISTER == 8
13663 #if !defined(TARGET_X86)
13664 /* Enabling this screws up the fp stack on x86 */
13667 if (mono_arch_is_soft_float ())
13670 /* Arguments are implicitly global */
13671 /* Putting R4 vars into registers doesn't work currently */
13672 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13673 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13675 * Make that the variable's liveness interval doesn't contain a call, since
13676 * that would cause the lvreg to be spilled, making the whole optimization
13679 /* This is too slow for JIT compilation */
13681 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13683 int def_index, call_index, ins_index;
13684 gboolean spilled = FALSE;
13689 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13690 const char *spec = INS_INFO (ins->opcode);
13692 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13693 def_index = ins_index;
13695 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13696 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13697 if (call_index > def_index) {
13703 if (MONO_IS_CALL (ins))
13704 call_index = ins_index;
13714 if (G_UNLIKELY (cfg->verbose_level > 2))
13715 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13716 var->flags |= MONO_INST_IS_DEAD;
13717 cfg->vreg_to_inst [var->dreg] = NULL;
13724 * Compress the varinfo and vars tables so the liveness computation is faster and
13725 * takes up less space.
13728 for (i = 0; i < cfg->num_varinfo; ++i) {
13729 MonoInst *var = cfg->varinfo [i];
13730 if (pos < i && cfg->locals_start == i)
13731 cfg->locals_start = pos;
13732 if (!(var->flags & MONO_INST_IS_DEAD)) {
13734 cfg->varinfo [pos] = cfg->varinfo [i];
13735 cfg->varinfo [pos]->inst_c0 = pos;
13736 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13737 cfg->vars [pos].idx = pos;
13738 #if SIZEOF_REGISTER == 4
13739 if (cfg->varinfo [pos]->type == STACK_I8) {
13740 /* Modify the two component vars too */
13743 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13744 var1->inst_c0 = pos;
13745 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13746 var1->inst_c0 = pos;
13753 cfg->num_varinfo = pos;
13754 if (cfg->locals_start > cfg->num_varinfo)
13755 cfg->locals_start = cfg->num_varinfo;
13759 * mono_spill_global_vars:
13761 * Generate spill code for variables which are not allocated to registers,
13762 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13763 * code is generated which could be optimized by the local optimization passes.
13766 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13768 MonoBasicBlock *bb;
13770 int orig_next_vreg;
13771 guint32 *vreg_to_lvreg;
13773 guint32 i, lvregs_len;
13774 gboolean dest_has_lvreg = FALSE;
13775 guint32 stacktypes [128];
13776 MonoInst **live_range_start, **live_range_end;
13777 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13778 int *gsharedvt_vreg_to_idx = NULL;
13780 *need_local_opts = FALSE;
13782 memset (spec2, 0, sizeof (spec2));
13784 /* FIXME: Move this function to mini.c */
13785 stacktypes ['i'] = STACK_PTR;
13786 stacktypes ['l'] = STACK_I8;
13787 stacktypes ['f'] = STACK_R8;
13788 #ifdef MONO_ARCH_SIMD_INTRINSICS
13789 stacktypes ['x'] = STACK_VTYPE;
13792 #if SIZEOF_REGISTER == 4
13793 /* Create MonoInsts for longs */
13794 for (i = 0; i < cfg->num_varinfo; i++) {
13795 MonoInst *ins = cfg->varinfo [i];
13797 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13798 switch (ins->type) {
13803 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13806 g_assert (ins->opcode == OP_REGOFFSET);
13808 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13810 tree->opcode = OP_REGOFFSET;
13811 tree->inst_basereg = ins->inst_basereg;
13812 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13814 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13816 tree->opcode = OP_REGOFFSET;
13817 tree->inst_basereg = ins->inst_basereg;
13818 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13828 if (cfg->compute_gc_maps) {
13829 /* registers need liveness info even for !non refs */
13830 for (i = 0; i < cfg->num_varinfo; i++) {
13831 MonoInst *ins = cfg->varinfo [i];
13833 if (ins->opcode == OP_REGVAR)
13834 ins->flags |= MONO_INST_GC_TRACK;
13838 if (cfg->gsharedvt) {
13839 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13841 for (i = 0; i < cfg->num_varinfo; ++i) {
13842 MonoInst *ins = cfg->varinfo [i];
13845 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13846 if (i >= cfg->locals_start) {
13848 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13849 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13850 ins->opcode = OP_GSHAREDVT_LOCAL;
13851 ins->inst_imm = idx;
13854 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13855 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13861 /* FIXME: widening and truncation */
13864 * As an optimization, when a variable allocated to the stack is first loaded into
13865 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13866 * the variable again.
13868 orig_next_vreg = cfg->next_vreg;
13869 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13870 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13874 * These arrays contain the first and last instructions accessing a given
13876 * Since we emit bblocks in the same order we process them here, and we
13877 * don't split live ranges, these will precisely describe the live range of
13878 * the variable, i.e. the instruction range where a valid value can be found
13879 * in the variables location.
13880 * The live range is computed using the liveness info computed by the liveness pass.
13881 * We can't use vmv->range, since that is an abstract live range, and we need
13882 * one which is instruction precise.
13883 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13885 /* FIXME: Only do this if debugging info is requested */
13886 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13887 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13888 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13889 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13891 /* Add spill loads/stores */
13892 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13895 if (cfg->verbose_level > 2)
13896 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13898 /* Clear vreg_to_lvreg array */
13899 for (i = 0; i < lvregs_len; i++)
13900 vreg_to_lvreg [lvregs [i]] = 0;
13904 MONO_BB_FOR_EACH_INS (bb, ins) {
13905 const char *spec = INS_INFO (ins->opcode);
13906 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13907 gboolean store, no_lvreg;
13908 int sregs [MONO_MAX_SRC_REGS];
13910 if (G_UNLIKELY (cfg->verbose_level > 2))
13911 mono_print_ins (ins);
13913 if (ins->opcode == OP_NOP)
13917 * We handle LDADDR here as well, since it can only be decomposed
13918 * when variable addresses are known.
13920 if (ins->opcode == OP_LDADDR) {
13921 MonoInst *var = ins->inst_p0;
13923 if (var->opcode == OP_VTARG_ADDR) {
13924 /* Happens on SPARC/S390 where vtypes are passed by reference */
13925 MonoInst *vtaddr = var->inst_left;
13926 if (vtaddr->opcode == OP_REGVAR) {
13927 ins->opcode = OP_MOVE;
13928 ins->sreg1 = vtaddr->dreg;
13930 else if (var->inst_left->opcode == OP_REGOFFSET) {
13931 ins->opcode = OP_LOAD_MEMBASE;
13932 ins->inst_basereg = vtaddr->inst_basereg;
13933 ins->inst_offset = vtaddr->inst_offset;
13936 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13937 /* gsharedvt arg passed by ref */
13938 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13940 ins->opcode = OP_LOAD_MEMBASE;
13941 ins->inst_basereg = var->inst_basereg;
13942 ins->inst_offset = var->inst_offset;
13943 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13944 MonoInst *load, *load2, *load3;
13945 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13946 int reg1, reg2, reg3;
13947 MonoInst *info_var = cfg->gsharedvt_info_var;
13948 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13952 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13955 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13957 g_assert (info_var);
13958 g_assert (locals_var);
13960 /* Mark the instruction used to compute the locals var as used */
13961 cfg->gsharedvt_locals_var_ins = NULL;
13963 /* Load the offset */
13964 if (info_var->opcode == OP_REGOFFSET) {
13965 reg1 = alloc_ireg (cfg);
13966 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13967 } else if (info_var->opcode == OP_REGVAR) {
13969 reg1 = info_var->dreg;
13971 g_assert_not_reached ();
13973 reg2 = alloc_ireg (cfg);
13974 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13975 /* Load the locals area address */
13976 reg3 = alloc_ireg (cfg);
13977 if (locals_var->opcode == OP_REGOFFSET) {
13978 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13979 } else if (locals_var->opcode == OP_REGVAR) {
13980 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13982 g_assert_not_reached ();
13984 /* Compute the address */
13985 ins->opcode = OP_PADD;
13989 mono_bblock_insert_before_ins (bb, ins, load3);
13990 mono_bblock_insert_before_ins (bb, load3, load2);
13992 mono_bblock_insert_before_ins (bb, load2, load);
13994 g_assert (var->opcode == OP_REGOFFSET);
13996 ins->opcode = OP_ADD_IMM;
13997 ins->sreg1 = var->inst_basereg;
13998 ins->inst_imm = var->inst_offset;
14001 *need_local_opts = TRUE;
14002 spec = INS_INFO (ins->opcode);
14005 if (ins->opcode < MONO_CEE_LAST) {
14006 mono_print_ins (ins);
14007 g_assert_not_reached ();
14011 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14015 if (MONO_IS_STORE_MEMBASE (ins)) {
14016 tmp_reg = ins->dreg;
14017 ins->dreg = ins->sreg2;
14018 ins->sreg2 = tmp_reg;
14021 spec2 [MONO_INST_DEST] = ' ';
14022 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14023 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14024 spec2 [MONO_INST_SRC3] = ' ';
14026 } else if (MONO_IS_STORE_MEMINDEX (ins))
14027 g_assert_not_reached ();
14032 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14033 printf ("\t %.3s %d", spec, ins->dreg);
14034 num_sregs = mono_inst_get_src_registers (ins, sregs);
14035 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14036 printf (" %d", sregs [srcindex]);
14043 regtype = spec [MONO_INST_DEST];
14044 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14047 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14048 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14049 MonoInst *store_ins;
14051 MonoInst *def_ins = ins;
14052 int dreg = ins->dreg; /* The original vreg */
14054 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14056 if (var->opcode == OP_REGVAR) {
14057 ins->dreg = var->dreg;
14058 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14060 * Instead of emitting a load+store, use a _membase opcode.
14062 g_assert (var->opcode == OP_REGOFFSET);
14063 if (ins->opcode == OP_MOVE) {
14067 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14068 ins->inst_basereg = var->inst_basereg;
14069 ins->inst_offset = var->inst_offset;
14072 spec = INS_INFO (ins->opcode);
14076 g_assert (var->opcode == OP_REGOFFSET);
14078 prev_dreg = ins->dreg;
14080 /* Invalidate any previous lvreg for this vreg */
14081 vreg_to_lvreg [ins->dreg] = 0;
14085 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14087 store_opcode = OP_STOREI8_MEMBASE_REG;
14090 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14092 #if SIZEOF_REGISTER != 8
14093 if (regtype == 'l') {
14094 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14095 mono_bblock_insert_after_ins (bb, ins, store_ins);
14096 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14097 mono_bblock_insert_after_ins (bb, ins, store_ins);
14098 def_ins = store_ins;
14103 g_assert (store_opcode != OP_STOREV_MEMBASE);
14105 /* Try to fuse the store into the instruction itself */
14106 /* FIXME: Add more instructions */
14107 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14108 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14109 ins->inst_imm = ins->inst_c0;
14110 ins->inst_destbasereg = var->inst_basereg;
14111 ins->inst_offset = var->inst_offset;
14112 spec = INS_INFO (ins->opcode);
14113 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14114 ins->opcode = store_opcode;
14115 ins->inst_destbasereg = var->inst_basereg;
14116 ins->inst_offset = var->inst_offset;
14120 tmp_reg = ins->dreg;
14121 ins->dreg = ins->sreg2;
14122 ins->sreg2 = tmp_reg;
14125 spec2 [MONO_INST_DEST] = ' ';
14126 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14127 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14128 spec2 [MONO_INST_SRC3] = ' ';
14130 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14131 // FIXME: The backends expect the base reg to be in inst_basereg
14132 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14134 ins->inst_basereg = var->inst_basereg;
14135 ins->inst_offset = var->inst_offset;
14136 spec = INS_INFO (ins->opcode);
14138 /* printf ("INS: "); mono_print_ins (ins); */
14139 /* Create a store instruction */
14140 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14142 /* Insert it after the instruction */
14143 mono_bblock_insert_after_ins (bb, ins, store_ins);
14145 def_ins = store_ins;
14148 * We can't assign ins->dreg to var->dreg here, since the
14149 * sregs could use it. So set a flag, and do it after
14152 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14153 dest_has_lvreg = TRUE;
14158 if (def_ins && !live_range_start [dreg]) {
14159 live_range_start [dreg] = def_ins;
14160 live_range_start_bb [dreg] = bb;
14163 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14166 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14167 tmp->inst_c1 = dreg;
14168 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14175 num_sregs = mono_inst_get_src_registers (ins, sregs);
14176 for (srcindex = 0; srcindex < 3; ++srcindex) {
14177 regtype = spec [MONO_INST_SRC1 + srcindex];
14178 sreg = sregs [srcindex];
14180 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14181 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14182 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14183 MonoInst *use_ins = ins;
14184 MonoInst *load_ins;
14185 guint32 load_opcode;
14187 if (var->opcode == OP_REGVAR) {
14188 sregs [srcindex] = var->dreg;
14189 //mono_inst_set_src_registers (ins, sregs);
14190 live_range_end [sreg] = use_ins;
14191 live_range_end_bb [sreg] = bb;
14193 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14196 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14197 /* var->dreg is a hreg */
14198 tmp->inst_c1 = sreg;
14199 mono_bblock_insert_after_ins (bb, ins, tmp);
14205 g_assert (var->opcode == OP_REGOFFSET);
14207 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14209 g_assert (load_opcode != OP_LOADV_MEMBASE);
14211 if (vreg_to_lvreg [sreg]) {
14212 g_assert (vreg_to_lvreg [sreg] != -1);
14214 /* The variable is already loaded to an lvreg */
14215 if (G_UNLIKELY (cfg->verbose_level > 2))
14216 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14217 sregs [srcindex] = vreg_to_lvreg [sreg];
14218 //mono_inst_set_src_registers (ins, sregs);
14222 /* Try to fuse the load into the instruction */
14223 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14224 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14225 sregs [0] = var->inst_basereg;
14226 //mono_inst_set_src_registers (ins, sregs);
14227 ins->inst_offset = var->inst_offset;
14228 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14229 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14230 sregs [1] = var->inst_basereg;
14231 //mono_inst_set_src_registers (ins, sregs);
14232 ins->inst_offset = var->inst_offset;
14234 if (MONO_IS_REAL_MOVE (ins)) {
14235 ins->opcode = OP_NOP;
14238 //printf ("%d ", srcindex); mono_print_ins (ins);
14240 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14242 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14243 if (var->dreg == prev_dreg) {
14245 * sreg refers to the value loaded by the load
14246 * emitted below, but we need to use ins->dreg
14247 * since it refers to the store emitted earlier.
14251 g_assert (sreg != -1);
14252 vreg_to_lvreg [var->dreg] = sreg;
14253 g_assert (lvregs_len < 1024);
14254 lvregs [lvregs_len ++] = var->dreg;
14258 sregs [srcindex] = sreg;
14259 //mono_inst_set_src_registers (ins, sregs);
14261 #if SIZEOF_REGISTER != 8
14262 if (regtype == 'l') {
14263 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14264 mono_bblock_insert_before_ins (bb, ins, load_ins);
14265 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14266 mono_bblock_insert_before_ins (bb, ins, load_ins);
14267 use_ins = load_ins;
14272 #if SIZEOF_REGISTER == 4
14273 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14275 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14276 mono_bblock_insert_before_ins (bb, ins, load_ins);
14277 use_ins = load_ins;
14281 if (var->dreg < orig_next_vreg) {
14282 live_range_end [var->dreg] = use_ins;
14283 live_range_end_bb [var->dreg] = bb;
14286 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14289 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14290 tmp->inst_c1 = var->dreg;
14291 mono_bblock_insert_after_ins (bb, ins, tmp);
14295 mono_inst_set_src_registers (ins, sregs);
14297 if (dest_has_lvreg) {
14298 g_assert (ins->dreg != -1);
14299 vreg_to_lvreg [prev_dreg] = ins->dreg;
14300 g_assert (lvregs_len < 1024);
14301 lvregs [lvregs_len ++] = prev_dreg;
14302 dest_has_lvreg = FALSE;
14306 tmp_reg = ins->dreg;
14307 ins->dreg = ins->sreg2;
14308 ins->sreg2 = tmp_reg;
14311 if (MONO_IS_CALL (ins)) {
14312 /* Clear vreg_to_lvreg array */
14313 for (i = 0; i < lvregs_len; i++)
14314 vreg_to_lvreg [lvregs [i]] = 0;
14316 } else if (ins->opcode == OP_NOP) {
14318 MONO_INST_NULLIFY_SREGS (ins);
14321 if (cfg->verbose_level > 2)
14322 mono_print_ins_index (1, ins);
14325 /* Extend the live range based on the liveness info */
14326 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14327 for (i = 0; i < cfg->num_varinfo; i ++) {
14328 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14330 if (vreg_is_volatile (cfg, vi->vreg))
14331 /* The liveness info is incomplete */
14334 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14335 /* Live from at least the first ins of this bb */
14336 live_range_start [vi->vreg] = bb->code;
14337 live_range_start_bb [vi->vreg] = bb;
14340 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14341 /* Live at least until the last ins of this bb */
14342 live_range_end [vi->vreg] = bb->last_ins;
14343 live_range_end_bb [vi->vreg] = bb;
14349 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14351 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14352 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14354 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14355 for (i = 0; i < cfg->num_varinfo; ++i) {
14356 int vreg = MONO_VARINFO (cfg, i)->vreg;
14359 if (live_range_start [vreg]) {
14360 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14362 ins->inst_c1 = vreg;
14363 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14365 if (live_range_end [vreg]) {
14366 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14368 ins->inst_c1 = vreg;
14369 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14370 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14372 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14378 if (cfg->gsharedvt_locals_var_ins) {
14379 /* Nullify if unused */
14380 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14381 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14384 g_free (live_range_start);
14385 g_free (live_range_end);
14386 g_free (live_range_start_bb);
14387 g_free (live_range_end_bb);
14392 * - use 'iadd' instead of 'int_add'
14393 * - handling ovf opcodes: decompose in method_to_ir.
14394 * - unify iregs/fregs
14395 * -> partly done, the missing parts are:
14396 * - a more complete unification would involve unifying the hregs as well, so
14397 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14398 * would no longer map to the machine hregs, so the code generators would need to
14399 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14400 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14401 * fp/non-fp branches speeds it up by about 15%.
14402 * - use sext/zext opcodes instead of shifts
14404 * - get rid of TEMPLOADs if possible and use vregs instead
14405 * - clean up usage of OP_P/OP_ opcodes
14406 * - cleanup usage of DUMMY_USE
14407 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14409 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14410 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14411 * - make sure handle_stack_args () is called before the branch is emitted
14412 * - when the new IR is done, get rid of all unused stuff
14413 * - COMPARE/BEQ as separate instructions or unify them ?
14414 * - keeping them separate allows specialized compare instructions like
14415 * compare_imm, compare_membase
14416 * - most back ends unify fp compare+branch, fp compare+ceq
14417 * - integrate mono_save_args into inline_method
14418 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14419 * - handle long shift opts on 32 bit platforms somehow: they require
14420 * 3 sregs (2 for arg1 and 1 for arg2)
14421 * - make byref a 'normal' type.
14422 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14423 * variable if needed.
14424 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14425 * like inline_method.
14426 * - remove inlining restrictions
14427 * - fix LNEG and enable cfold of INEG
14428 * - generalize x86 optimizations like ldelema as a peephole optimization
14429 * - add store_mem_imm for amd64
14430 * - optimize the loading of the interruption flag in the managed->native wrappers
14431 * - avoid special handling of OP_NOP in passes
14432 * - move code inserting instructions into one function/macro.
14433 * - try a coalescing phase after liveness analysis
14434 * - add float -> vreg conversion + local optimizations on !x86
14435 * - figure out how to handle decomposed branches during optimizations, ie.
14436 * compare+branch, op_jump_table+op_br etc.
14437 * - promote RuntimeXHandles to vregs
14438 * - vtype cleanups:
14439 * - add a NEW_VARLOADA_VREG macro
14440 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14441 * accessing vtype fields.
14442 * - get rid of I8CONST on 64 bit platforms
14443 * - dealing with the increase in code size due to branches created during opcode
14445 * - use extended basic blocks
14446 * - all parts of the JIT
14447 * - handle_global_vregs () && local regalloc
14448 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14449 * - sources of increase in code size:
14452 * - isinst and castclass
14453 * - lvregs not allocated to global registers even if used multiple times
14454 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14456 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14457 * - add all micro optimizations from the old JIT
14458 * - put tree optimizations into the deadce pass
14459 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14460 * specific function.
14461 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14462 * fcompare + branchCC.
14463 * - create a helper function for allocating a stack slot, taking into account
14464 * MONO_CFG_HAS_SPILLUP.
14466 * - merge the ia64 switch changes.
14467 * - optimize mono_regstate2_alloc_int/float.
14468 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14469 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14470 * parts of the tree could be separated by other instructions, killing the tree
14471 * arguments, or stores killing loads etc. Also, should we fold loads into other
14472 * instructions if the result of the load is used multiple times ?
14473 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14474 * - LAST MERGE: 108395.
14475 * - when returning vtypes in registers, generate IR and append it to the end of the
14476 * last bb instead of doing it in the epilog.
14477 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14485 - When to decompose opcodes:
14486 - earlier: this makes some optimizations hard to implement, since the low level IR
14487 no longer contains the neccessary information. But it is easier to do.
14488 - later: harder to implement, enables more optimizations.
14489 - Branches inside bblocks:
14490 - created when decomposing complex opcodes.
14491 - branches to another bblock: harmless, but not tracked by the branch
14492 optimizations, so need to branch to a label at the start of the bblock.
14493 - branches to inside the same bblock: very problematic, trips up the local
14494 reg allocator. Can be fixed by spitting the current bblock, but that is a
14495 complex operation, since some local vregs can become global vregs etc.
14496 - Local/global vregs:
14497 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14498 local register allocator.
14499 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14500 structure, created by mono_create_var (). Assigned to hregs or the stack by
14501 the global register allocator.
14502 - When to do optimizations like alu->alu_imm:
14503 - earlier -> saves work later on since the IR will be smaller/simpler
14504 - later -> can work on more instructions
14505 - Handling of valuetypes:
14506 - When a vtype is pushed on the stack, a new temporary is created, an
14507 instruction computing its address (LDADDR) is emitted and pushed on
14508 the stack. Need to optimize cases when the vtype is used immediately as in
14509 argument passing, stloc etc.
14510 - Instead of the to_end stuff in the old JIT, simply call the function handling
14511 the values on the stack before emitting the last instruction of the bb.
14514 #endif /* DISABLE_JIT */