2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_replace_type (type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
312 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
368 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
384 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
395 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 /* Emit conversions so both operands of a binary opcode are of the same type */
464 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
466 MonoInst *arg1 = *arg1_ref;
467 MonoInst *arg2 = *arg2_ref;
470 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
471 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
474 /* Mixing r4/r8 is allowed by the spec */
475 if (arg1->type == STACK_R4) {
476 int dreg = alloc_freg (cfg);
478 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
479 conv->type = STACK_R8;
483 if (arg2->type == STACK_R4) {
484 int dreg = alloc_freg (cfg);
486 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
487 conv->type = STACK_R8;
493 #if SIZEOF_REGISTER == 8
494 /* FIXME: Need to add many more cases */
495 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
498 int dr = alloc_preg (cfg);
499 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
500 (ins)->sreg2 = widen->dreg;
505 #define ADD_BINOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 ins->sreg2 = sp [1]->dreg; \
510 type_from_op (cfg, ins, sp [0], sp [1]); \
512 /* Have to insert a widening op */ \
513 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
514 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
515 MONO_ADD_INS ((cfg)->cbb, (ins)); \
516 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
519 #define ADD_UNOP(op) do { \
520 MONO_INST_NEW (cfg, ins, (op)); \
522 ins->sreg1 = sp [0]->dreg; \
523 type_from_op (cfg, ins, sp [0], NULL); \
525 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
526 MONO_ADD_INS ((cfg)->cbb, (ins)); \
527 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
530 #define ADD_BINCOND(next_block) do { \
533 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
534 cmp->sreg1 = sp [0]->dreg; \
535 cmp->sreg2 = sp [1]->dreg; \
536 type_from_op (cfg, cmp, sp [0], sp [1]); \
538 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
539 type_from_op (cfg, ins, sp [0], sp [1]); \
540 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
541 GET_BBLOCK (cfg, tblock, target); \
542 link_bblock (cfg, bblock, tblock); \
543 ins->inst_true_bb = tblock; \
544 if ((next_block)) { \
545 link_bblock (cfg, bblock, (next_block)); \
546 ins->inst_false_bb = (next_block); \
547 start_new_bblock = 1; \
549 GET_BBLOCK (cfg, tblock, ip); \
550 link_bblock (cfg, bblock, tblock); \
551 ins->inst_false_bb = tblock; \
552 start_new_bblock = 2; \
554 if (sp != stack_start) { \
555 handle_stack_args (cfg, stack_start, sp - stack_start); \
556 CHECK_UNVERIFIABLE (cfg); \
558 MONO_ADD_INS (bblock, cmp); \
559 MONO_ADD_INS (bblock, ins); \
563 * link_bblock: Links two basic blocks
565 * links two basic blocks in the control flow graph, the 'from'
566 * argument is the starting block and the 'to' argument is the block
567 * the control flow ends to after 'from'.
570 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
572 MonoBasicBlock **newa;
576 if (from->cil_code) {
578 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
580 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
583 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
585 printf ("edge from entry to exit\n");
590 for (i = 0; i < from->out_count; ++i) {
591 if (to == from->out_bb [i]) {
597 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
598 for (i = 0; i < from->out_count; ++i) {
599 newa [i] = from->out_bb [i];
607 for (i = 0; i < to->in_count; ++i) {
608 if (from == to->in_bb [i]) {
614 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
615 for (i = 0; i < to->in_count; ++i) {
616 newa [i] = to->in_bb [i];
625 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
627 link_bblock (cfg, from, to);
631 * mono_find_block_region:
633 * We mark each basic block with a region ID. We use that to avoid BB
634 * optimizations when blocks are in different regions.
637 * A region token that encodes where this region is, and information
638 * about the clause owner for this block.
640 * The region encodes the try/catch/filter clause that owns this block
641 * as well as the type. -1 is a special value that represents a block
642 * that is in none of try/catch/filter.
645 mono_find_block_region (MonoCompile *cfg, int offset)
647 MonoMethodHeader *header = cfg->header;
648 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
654 (offset < (clause->handler_offset)))
655 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
657 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
658 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
659 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
660 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
661 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
663 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
666 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
667 return ((i + 1) << 8) | clause->flags;
674 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
676 MonoMethodHeader *header = cfg->header;
677 MonoExceptionClause *clause;
681 for (i = 0; i < header->num_clauses; ++i) {
682 clause = &header->clauses [i];
683 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
684 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
685 if (clause->flags == type)
686 res = g_list_append (res, clause);
693 mono_create_spvar_for_region (MonoCompile *cfg, int region)
697 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
701 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
702 /* prevent it from being register allocated */
703 var->flags |= MONO_INST_VOLATILE;
705 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
709 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
711 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
715 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
719 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
723 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
724 /* prevent it from being register allocated */
725 var->flags |= MONO_INST_VOLATILE;
727 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
733 * Returns the type used in the eval stack when @type is loaded.
734 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
737 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
741 type = mini_replace_type (type);
742 inst->klass = klass = mono_class_from_mono_type (type);
744 inst->type = STACK_MP;
749 switch (type->type) {
751 inst->type = STACK_INV;
755 case MONO_TYPE_BOOLEAN:
761 inst->type = STACK_I4;
766 case MONO_TYPE_FNPTR:
767 inst->type = STACK_PTR;
769 case MONO_TYPE_CLASS:
770 case MONO_TYPE_STRING:
771 case MONO_TYPE_OBJECT:
772 case MONO_TYPE_SZARRAY:
773 case MONO_TYPE_ARRAY:
774 inst->type = STACK_OBJ;
778 inst->type = STACK_I8;
781 inst->type = cfg->r4_stack_type;
784 inst->type = STACK_R8;
786 case MONO_TYPE_VALUETYPE:
787 if (type->data.klass->enumtype) {
788 type = mono_class_enum_basetype (type->data.klass);
792 inst->type = STACK_VTYPE;
795 case MONO_TYPE_TYPEDBYREF:
796 inst->klass = mono_defaults.typed_reference_class;
797 inst->type = STACK_VTYPE;
799 case MONO_TYPE_GENERICINST:
800 type = &type->data.generic_class->container_class->byval_arg;
804 g_assert (cfg->generic_sharing_context);
805 if (mini_is_gsharedvt_type (cfg, type)) {
806 g_assert (cfg->gsharedvt);
807 inst->type = STACK_VTYPE;
809 inst->type = STACK_OBJ;
813 g_error ("unknown type 0x%02x in eval stack type", type->type);
818 * The following tables are used to quickly validate the IL code in type_from_op ().
821 bin_num_table [STACK_MAX] [STACK_MAX] = {
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
827 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
835 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
838 /* reduce the size of this table */
840 bin_int_table [STACK_MAX] [STACK_MAX] = {
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
852 bin_comp_table [STACK_MAX] [STACK_MAX] = {
853 /* Inv i L p F & O vt r4 */
855 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
856 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
857 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
859 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
860 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
861 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
862 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
865 /* reduce the size of this table */
867 shift_table [STACK_MAX] [STACK_MAX] = {
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
879 * Tables to map from the non-specific opcode to the matching
880 * type-specific opcode.
882 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
884 binops_op_map [STACK_MAX] = {
885 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
888 /* handles from CEE_NEG to CEE_CONV_U8 */
890 unops_op_map [STACK_MAX] = {
891 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
894 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
896 ovfops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
900 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
902 ovf2ops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
906 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
908 ovf3ops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
912 /* handles from CEE_BEQ to CEE_BLT_UN */
914 beqops_op_map [STACK_MAX] = {
915 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
918 /* handles from CEE_CEQ to CEE_CLT_UN */
920 ceqops_op_map [STACK_MAX] = {
921 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
925 * Sets ins->type (the type on the eval stack) according to the
926 * type of the opcode and the arguments to it.
927 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
929 * FIXME: this function sets ins->type unconditionally in some cases, but
930 * it should set it to invalid for some types (a conv.x on an object)
933 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
935 switch (ins->opcode) {
942 /* FIXME: check unverifiable args for STACK_MP */
943 ins->type = bin_num_table [src1->type] [src2->type];
944 ins->opcode += binops_op_map [ins->type];
951 ins->type = bin_int_table [src1->type] [src2->type];
952 ins->opcode += binops_op_map [ins->type];
957 ins->type = shift_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
963 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE;
966 else if (src1->type == STACK_R4)
967 ins->opcode = OP_RCOMPARE;
968 else if (src1->type == STACK_R8)
969 ins->opcode = OP_FCOMPARE;
971 ins->opcode = OP_ICOMPARE;
973 case OP_ICOMPARE_IMM:
974 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
975 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
976 ins->opcode = OP_LCOMPARE_IMM;
988 ins->opcode += beqops_op_map [src1->type];
991 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
992 ins->opcode += ceqops_op_map [src1->type];
998 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
999 ins->opcode += ceqops_op_map [src1->type];
1003 ins->type = neg_table [src1->type];
1004 ins->opcode += unops_op_map [ins->type];
1007 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1008 ins->type = src1->type;
1010 ins->type = STACK_INV;
1011 ins->opcode += unops_op_map [ins->type];
1017 ins->type = STACK_I4;
1018 ins->opcode += unops_op_map [src1->type];
1021 ins->type = STACK_R8;
1022 switch (src1->type) {
1025 ins->opcode = OP_ICONV_TO_R_UN;
1028 ins->opcode = OP_LCONV_TO_R_UN;
1032 case CEE_CONV_OVF_I1:
1033 case CEE_CONV_OVF_U1:
1034 case CEE_CONV_OVF_I2:
1035 case CEE_CONV_OVF_U2:
1036 case CEE_CONV_OVF_I4:
1037 case CEE_CONV_OVF_U4:
1038 ins->type = STACK_I4;
1039 ins->opcode += ovf3ops_op_map [src1->type];
1041 case CEE_CONV_OVF_I_UN:
1042 case CEE_CONV_OVF_U_UN:
1043 ins->type = STACK_PTR;
1044 ins->opcode += ovf2ops_op_map [src1->type];
1046 case CEE_CONV_OVF_I1_UN:
1047 case CEE_CONV_OVF_I2_UN:
1048 case CEE_CONV_OVF_I4_UN:
1049 case CEE_CONV_OVF_U1_UN:
1050 case CEE_CONV_OVF_U2_UN:
1051 case CEE_CONV_OVF_U4_UN:
1052 ins->type = STACK_I4;
1053 ins->opcode += ovf2ops_op_map [src1->type];
1056 ins->type = STACK_PTR;
1057 switch (src1->type) {
1059 ins->opcode = OP_ICONV_TO_U;
1063 #if SIZEOF_VOID_P == 8
1064 ins->opcode = OP_LCONV_TO_U;
1066 ins->opcode = OP_MOVE;
1070 ins->opcode = OP_LCONV_TO_U;
1073 ins->opcode = OP_FCONV_TO_U;
1079 ins->type = STACK_I8;
1080 ins->opcode += unops_op_map [src1->type];
1082 case CEE_CONV_OVF_I8:
1083 case CEE_CONV_OVF_U8:
1084 ins->type = STACK_I8;
1085 ins->opcode += ovf3ops_op_map [src1->type];
1087 case CEE_CONV_OVF_U8_UN:
1088 case CEE_CONV_OVF_I8_UN:
1089 ins->type = STACK_I8;
1090 ins->opcode += ovf2ops_op_map [src1->type];
1093 ins->type = cfg->r4_stack_type;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1098 ins->opcode += unops_op_map [src1->type];
1101 ins->type = STACK_R8;
1105 ins->type = STACK_I4;
1106 ins->opcode += ovfops_op_map [src1->type];
1109 case CEE_CONV_OVF_I:
1110 case CEE_CONV_OVF_U:
1111 ins->type = STACK_PTR;
1112 ins->opcode += ovfops_op_map [src1->type];
1115 case CEE_ADD_OVF_UN:
1117 case CEE_MUL_OVF_UN:
1119 case CEE_SUB_OVF_UN:
1120 ins->type = bin_num_table [src1->type] [src2->type];
1121 ins->opcode += ovfops_op_map [src1->type];
1122 if (ins->type == STACK_R8)
1123 ins->type = STACK_INV;
1125 case OP_LOAD_MEMBASE:
1126 ins->type = STACK_PTR;
1128 case OP_LOADI1_MEMBASE:
1129 case OP_LOADU1_MEMBASE:
1130 case OP_LOADI2_MEMBASE:
1131 case OP_LOADU2_MEMBASE:
1132 case OP_LOADI4_MEMBASE:
1133 case OP_LOADU4_MEMBASE:
1134 ins->type = STACK_PTR;
1136 case OP_LOADI8_MEMBASE:
1137 ins->type = STACK_I8;
1139 case OP_LOADR4_MEMBASE:
1140 ins->type = cfg->r4_stack_type;
1142 case OP_LOADR8_MEMBASE:
1143 ins->type = STACK_R8;
1146 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1150 if (ins->type == STACK_MP)
1151 ins->klass = mono_defaults.object_class;
1156 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1162 param_table [STACK_MAX] [STACK_MAX] = {
1167 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1171 switch (args->type) {
1181 for (i = 0; i < sig->param_count; ++i) {
1182 switch (args [i].type) {
1186 if (!sig->params [i]->byref)
1190 if (sig->params [i]->byref)
1192 switch (sig->params [i]->type) {
1193 case MONO_TYPE_CLASS:
1194 case MONO_TYPE_STRING:
1195 case MONO_TYPE_OBJECT:
1196 case MONO_TYPE_SZARRAY:
1197 case MONO_TYPE_ARRAY:
1204 if (sig->params [i]->byref)
1206 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1215 /*if (!param_table [args [i].type] [sig->params [i]->type])
1223 * When we need a pointer to the current domain many times in a method, we
1224 * call mono_domain_get() once and we store the result in a local variable.
1225 * This function returns the variable that represents the MonoDomain*.
1227 inline static MonoInst *
1228 mono_get_domainvar (MonoCompile *cfg)
1230 if (!cfg->domainvar)
1231 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1232 return cfg->domainvar;
1236 * The got_var contains the address of the Global Offset Table when AOT
1240 mono_get_got_var (MonoCompile *cfg)
1242 #ifdef MONO_ARCH_NEED_GOT_VAR
1243 if (!cfg->compile_aot)
1245 if (!cfg->got_var) {
1246 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1248 return cfg->got_var;
1255 mono_get_vtable_var (MonoCompile *cfg)
1257 g_assert (cfg->generic_sharing_context);
1259 if (!cfg->rgctx_var) {
1260 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 /* force the var to be stack allocated */
1262 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1265 return cfg->rgctx_var;
1269 type_from_stack_type (MonoInst *ins) {
1270 switch (ins->type) {
1271 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1272 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1273 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1274 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1275 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1277 return &ins->klass->this_arg;
1278 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1279 case STACK_VTYPE: return &ins->klass->byval_arg;
1281 g_error ("stack type %d to monotype not handled\n", ins->type);
1286 static G_GNUC_UNUSED int
1287 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1289 t = mono_type_get_underlying_type (t);
1293 case MONO_TYPE_BOOLEAN:
1296 case MONO_TYPE_CHAR:
1303 case MONO_TYPE_FNPTR:
1305 case MONO_TYPE_CLASS:
1306 case MONO_TYPE_STRING:
1307 case MONO_TYPE_OBJECT:
1308 case MONO_TYPE_SZARRAY:
1309 case MONO_TYPE_ARRAY:
1315 return cfg->r4_stack_type;
1318 case MONO_TYPE_VALUETYPE:
1319 case MONO_TYPE_TYPEDBYREF:
1321 case MONO_TYPE_GENERICINST:
1322 if (mono_type_generic_inst_is_valuetype (t))
1328 g_assert_not_reached ();
1335 array_access_to_klass (int opcode)
1339 return mono_defaults.byte_class;
1341 return mono_defaults.uint16_class;
1344 return mono_defaults.int_class;
1347 return mono_defaults.sbyte_class;
1350 return mono_defaults.int16_class;
1353 return mono_defaults.int32_class;
1355 return mono_defaults.uint32_class;
1358 return mono_defaults.int64_class;
1361 return mono_defaults.single_class;
1364 return mono_defaults.double_class;
1365 case CEE_LDELEM_REF:
1366 case CEE_STELEM_REF:
1367 return mono_defaults.object_class;
1369 g_assert_not_reached ();
1375 * We try to share variables when possible
1378 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1383 /* inlining can result in deeper stacks */
1384 if (slot >= cfg->header->max_stack)
1385 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1387 pos = ins->type - 1 + slot * STACK_MAX;
1389 switch (ins->type) {
1396 if ((vnum = cfg->intvars [pos]))
1397 return cfg->varinfo [vnum];
1398 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1399 cfg->intvars [pos] = res->inst_c0;
1402 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1408 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1411 * Don't use this if a generic_context is set, since that means AOT can't
1412 * look up the method using just the image+token.
1413 * table == 0 means this is a reference made from a wrapper.
1415 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1416 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1417 jump_info_token->image = image;
1418 jump_info_token->token = token;
1419 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1424 * This function is called to handle items that are left on the evaluation stack
1425 * at basic block boundaries. What happens is that we save the values to local variables
1426 * and we reload them later when first entering the target basic block (with the
1427 * handle_loaded_temps () function).
1428 * A single joint point will use the same variables (stored in the array bb->out_stack or
1429 * bb->in_stack, if the basic block is before or after the joint point).
1431 * This function needs to be called _before_ emitting the last instruction of
1432 * the bb (i.e. before emitting a branch).
1433 * If the stack merge fails at a join point, cfg->unverifiable is set.
1436 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1439 MonoBasicBlock *bb = cfg->cbb;
1440 MonoBasicBlock *outb;
1441 MonoInst *inst, **locals;
1446 if (cfg->verbose_level > 3)
1447 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1448 if (!bb->out_scount) {
1449 bb->out_scount = count;
1450 //printf ("bblock %d has out:", bb->block_num);
1452 for (i = 0; i < bb->out_count; ++i) {
1453 outb = bb->out_bb [i];
1454 /* exception handlers are linked, but they should not be considered for stack args */
1455 if (outb->flags & BB_EXCEPTION_HANDLER)
1457 //printf (" %d", outb->block_num);
1458 if (outb->in_stack) {
1460 bb->out_stack = outb->in_stack;
1466 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1467 for (i = 0; i < count; ++i) {
1469 * try to reuse temps already allocated for this purpouse, if they occupy the same
1470 * stack slot and if they are of the same type.
1471 * This won't cause conflicts since if 'local' is used to
1472 * store one of the values in the in_stack of a bblock, then
1473 * the same variable will be used for the same outgoing stack
1475 * This doesn't work when inlining methods, since the bblocks
1476 * in the inlined methods do not inherit their in_stack from
1477 * the bblock they are inlined to. See bug #58863 for an
1480 if (cfg->inlined_method)
1481 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1483 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1488 for (i = 0; i < bb->out_count; ++i) {
1489 outb = bb->out_bb [i];
1490 /* exception handlers are linked, but they should not be considered for stack args */
1491 if (outb->flags & BB_EXCEPTION_HANDLER)
1493 if (outb->in_scount) {
1494 if (outb->in_scount != bb->out_scount) {
1495 cfg->unverifiable = TRUE;
1498 continue; /* check they are the same locals */
1500 outb->in_scount = count;
1501 outb->in_stack = bb->out_stack;
1504 locals = bb->out_stack;
1506 for (i = 0; i < count; ++i) {
1507 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1508 inst->cil_code = sp [i]->cil_code;
1509 sp [i] = locals [i];
1510 if (cfg->verbose_level > 3)
1511 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1515 * It is possible that the out bblocks already have in_stack assigned, and
1516 * the in_stacks differ. In this case, we will store to all the different
1523 /* Find a bblock which has a different in_stack */
1525 while (bindex < bb->out_count) {
1526 outb = bb->out_bb [bindex];
1527 /* exception handlers are linked, but they should not be considered for stack args */
1528 if (outb->flags & BB_EXCEPTION_HANDLER) {
1532 if (outb->in_stack != locals) {
1533 for (i = 0; i < count; ++i) {
1534 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1535 inst->cil_code = sp [i]->cil_code;
1536 sp [i] = locals [i];
1537 if (cfg->verbose_level > 3)
1538 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1540 locals = outb->in_stack;
1549 /* Emit code which loads interface_offsets [klass->interface_id]
1550 * The array is stored in memory before vtable.
1553 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1555 if (cfg->compile_aot) {
1556 int ioffset_reg = alloc_preg (cfg);
1557 int iid_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1569 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1571 int ibitmap_reg = alloc_preg (cfg);
1572 #ifdef COMPRESSED_INTERFACE_BITMAP
1574 MonoInst *res, *ins;
1575 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1576 MONO_ADD_INS (cfg->cbb, ins);
1578 if (cfg->compile_aot)
1579 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1581 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1582 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1583 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1585 int ibitmap_byte_reg = alloc_preg (cfg);
1587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1589 if (cfg->compile_aot) {
1590 int iid_reg = alloc_preg (cfg);
1591 int shifted_iid_reg = alloc_preg (cfg);
1592 int ibitmap_byte_address_reg = alloc_preg (cfg);
1593 int masked_iid_reg = alloc_preg (cfg);
1594 int iid_one_bit_reg = alloc_preg (cfg);
1595 int iid_bit_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1598 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1599 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1601 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1602 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1605 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1612 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1613 * stored in "klass_reg" implements the interface "klass".
1616 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1618 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1622 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1623 * stored in "vtable_reg" implements the interface "klass".
1626 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1628 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1632 * Emit code which checks whenever the interface id of @klass is smaller than
1633 * than the value given by max_iid_reg.
1636 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1637 MonoBasicBlock *false_target)
1639 if (cfg->compile_aot) {
1640 int iid_reg = alloc_preg (cfg);
1641 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1642 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1649 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1652 /* Same as above, but obtains max_iid from a vtable */
1654 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1655 MonoBasicBlock *false_target)
1657 int max_iid_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1660 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1663 /* Same as above, but obtains max_iid from a klass */
1665 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1666 MonoBasicBlock *false_target)
1668 int max_iid_reg = alloc_preg (cfg);
1670 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1671 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1675 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1677 int idepth_reg = alloc_preg (cfg);
1678 int stypes_reg = alloc_preg (cfg);
1679 int stype = alloc_preg (cfg);
1681 mono_class_setup_supertypes (klass);
1683 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1684 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1685 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1686 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1692 } else if (cfg->compile_aot) {
1693 int const_reg = alloc_preg (cfg);
1694 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1695 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1699 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1703 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1705 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1709 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1711 int intf_reg = alloc_preg (cfg);
1713 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1714 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1715 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1719 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1723 * Variant of the above that takes a register to the class, not the vtable.
1726 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1728 int intf_bit_reg = alloc_preg (cfg);
1730 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1731 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1732 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1736 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1740 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1743 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1744 } else if (cfg->compile_aot) {
1745 int const_reg = alloc_preg (cfg);
1746 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1751 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1755 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1757 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1761 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1763 if (cfg->compile_aot) {
1764 int const_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1766 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1770 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1774 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1777 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1780 int rank_reg = alloc_preg (cfg);
1781 int eclass_reg = alloc_preg (cfg);
1783 g_assert (!klass_inst);
1784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1786 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1787 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1789 if (klass->cast_class == mono_defaults.object_class) {
1790 int parent_reg = alloc_preg (cfg);
1791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1792 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1793 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1794 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1795 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1796 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1797 } else if (klass->cast_class == mono_defaults.enum_class) {
1798 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1799 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1800 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1802 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1803 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1806 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1807 /* Check that the object is a vector too */
1808 int bounds_reg = alloc_preg (cfg);
1809 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1811 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1814 int idepth_reg = alloc_preg (cfg);
1815 int stypes_reg = alloc_preg (cfg);
1816 int stype = alloc_preg (cfg);
1818 mono_class_setup_supertypes (klass);
1820 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1821 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1822 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1823 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1826 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1827 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1832 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1834 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1838 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1842 g_assert (val == 0);
1847 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1850 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1856 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1858 #if SIZEOF_REGISTER == 8
1860 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1866 val_reg = alloc_preg (cfg);
1868 if (SIZEOF_REGISTER == 8)
1869 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1871 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1874 /* This could be optimized further if neccesary */
1876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1883 #if !NO_UNALIGNED_ACCESS
1884 if (SIZEOF_REGISTER == 8) {
1886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1899 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1904 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1916 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1923 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1924 g_assert (size < 10000);
1927 /* This could be optimized further if neccesary */
1929 cur_reg = alloc_preg (cfg);
1930 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1931 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1938 #if !NO_UNALIGNED_ACCESS
1939 if (SIZEOF_REGISTER == 8) {
1941 cur_reg = alloc_preg (cfg);
1942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1952 cur_reg = alloc_preg (cfg);
1953 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1954 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1960 cur_reg = alloc_preg (cfg);
1961 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1962 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1968 cur_reg = alloc_preg (cfg);
1969 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1970 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1978 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1982 if (cfg->compile_aot) {
1983 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1984 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1986 ins->sreg2 = c->dreg;
1987 MONO_ADD_INS (cfg->cbb, ins);
1989 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1991 ins->inst_offset = mini_get_tls_offset (tls_key);
1992 MONO_ADD_INS (cfg->cbb, ins);
1999 * Emit IR to push the current LMF onto the LMF stack.
2002 emit_push_lmf (MonoCompile *cfg)
2005 * Emit IR to push the LMF:
2006 * lmf_addr = <lmf_addr from tls>
2007 * lmf->lmf_addr = lmf_addr
2008 * lmf->prev_lmf = *lmf_addr
2011 int lmf_reg, prev_lmf_reg;
2012 MonoInst *ins, *lmf_ins;
2017 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2018 /* Load current lmf */
2019 lmf_ins = mono_get_lmf_intrinsic (cfg);
2021 MONO_ADD_INS (cfg->cbb, lmf_ins);
2022 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2023 lmf_reg = ins->dreg;
2024 /* Save previous_lmf */
2025 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2027 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2030 * Store lmf_addr in a variable, so it can be allocated to a global register.
2032 if (!cfg->lmf_addr_var)
2033 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2036 ins = mono_get_jit_tls_intrinsic (cfg);
2038 int jit_tls_dreg = ins->dreg;
2040 MONO_ADD_INS (cfg->cbb, ins);
2041 lmf_reg = alloc_preg (cfg);
2042 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2044 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2047 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2049 MONO_ADD_INS (cfg->cbb, lmf_ins);
2052 MonoInst *args [16], *jit_tls_ins, *ins;
2054 /* Inline mono_get_lmf_addr () */
2055 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2057 /* Load mono_jit_tls_id */
2058 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2059 /* call pthread_getspecific () */
2060 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2061 /* lmf_addr = &jit_tls->lmf */
2062 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2065 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2069 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2071 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2072 lmf_reg = ins->dreg;
2074 prev_lmf_reg = alloc_preg (cfg);
2075 /* Save previous_lmf */
2076 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2077 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2079 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2086 * Emit IR to pop the current LMF from the LMF stack.
2089 emit_pop_lmf (MonoCompile *cfg)
2091 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2097 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2098 lmf_reg = ins->dreg;
2100 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2101 /* Load previous_lmf */
2102 prev_lmf_reg = alloc_preg (cfg);
2103 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2105 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2108 * Emit IR to pop the LMF:
2109 * *(lmf->lmf_addr) = lmf->prev_lmf
2111 /* This could be called before emit_push_lmf () */
2112 if (!cfg->lmf_addr_var)
2113 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2114 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2116 prev_lmf_reg = alloc_preg (cfg);
2117 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2118 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2123 emit_instrumentation_call (MonoCompile *cfg, void *func)
2125 MonoInst *iargs [1];
2128 * Avoid instrumenting inlined methods since it can
2129 * distort profiling results.
2131 if (cfg->method != cfg->current_method)
2134 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2135 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2136 mono_emit_jit_icall (cfg, func, iargs);
2141 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2144 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2147 type = mini_get_basic_type_from_generic (gsctx, type);
2148 type = mini_replace_type (type);
2149 switch (type->type) {
2150 case MONO_TYPE_VOID:
2151 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2154 case MONO_TYPE_BOOLEAN:
2157 case MONO_TYPE_CHAR:
2160 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2164 case MONO_TYPE_FNPTR:
2165 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2166 case MONO_TYPE_CLASS:
2167 case MONO_TYPE_STRING:
2168 case MONO_TYPE_OBJECT:
2169 case MONO_TYPE_SZARRAY:
2170 case MONO_TYPE_ARRAY:
2171 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2174 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2177 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2179 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2181 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2182 case MONO_TYPE_VALUETYPE:
2183 if (type->data.klass->enumtype) {
2184 type = mono_class_enum_basetype (type->data.klass);
2187 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2188 case MONO_TYPE_TYPEDBYREF:
2189 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2190 case MONO_TYPE_GENERICINST:
2191 type = &type->data.generic_class->container_class->byval_arg;
2194 case MONO_TYPE_MVAR:
2196 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2198 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2204 * target_type_is_incompatible:
2205 * @cfg: MonoCompile context
2207 * Check that the item @arg on the evaluation stack can be stored
2208 * in the target type (can be a local, or field, etc).
2209 * The cfg arg can be used to check if we need verification or just
2212 * Returns: non-0 value if arg can't be stored on a target.
2215 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2217 MonoType *simple_type;
2220 target = mini_replace_type (target);
2221 if (target->byref) {
2222 /* FIXME: check that the pointed to types match */
2223 if (arg->type == STACK_MP)
2224 return arg->klass != mono_class_from_mono_type (target);
2225 if (arg->type == STACK_PTR)
2230 simple_type = mono_type_get_underlying_type (target);
2231 switch (simple_type->type) {
2232 case MONO_TYPE_VOID:
2236 case MONO_TYPE_BOOLEAN:
2239 case MONO_TYPE_CHAR:
2242 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2246 /* STACK_MP is needed when setting pinned locals */
2247 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2252 case MONO_TYPE_FNPTR:
2254 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2255 * in native int. (#688008).
2257 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2260 case MONO_TYPE_CLASS:
2261 case MONO_TYPE_STRING:
2262 case MONO_TYPE_OBJECT:
2263 case MONO_TYPE_SZARRAY:
2264 case MONO_TYPE_ARRAY:
2265 if (arg->type != STACK_OBJ)
2267 /* FIXME: check type compatibility */
2271 if (arg->type != STACK_I8)
2275 if (arg->type != cfg->r4_stack_type)
2279 if (arg->type != STACK_R8)
2282 case MONO_TYPE_VALUETYPE:
2283 if (arg->type != STACK_VTYPE)
2285 klass = mono_class_from_mono_type (simple_type);
2286 if (klass != arg->klass)
2289 case MONO_TYPE_TYPEDBYREF:
2290 if (arg->type != STACK_VTYPE)
2292 klass = mono_class_from_mono_type (simple_type);
2293 if (klass != arg->klass)
2296 case MONO_TYPE_GENERICINST:
2297 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2298 if (arg->type != STACK_VTYPE)
2300 klass = mono_class_from_mono_type (simple_type);
2301 if (klass != arg->klass)
2305 if (arg->type != STACK_OBJ)
2307 /* FIXME: check type compatibility */
2311 case MONO_TYPE_MVAR:
2312 g_assert (cfg->generic_sharing_context);
2313 if (mini_type_var_is_vt (cfg, simple_type)) {
2314 if (arg->type != STACK_VTYPE)
2317 if (arg->type != STACK_OBJ)
2322 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2328 * Prepare arguments for passing to a function call.
2329 * Return a non-zero value if the arguments can't be passed to the given
2331 * The type checks are not yet complete and some conversions may need
2332 * casts on 32 or 64 bit architectures.
2334 * FIXME: implement this using target_type_is_incompatible ()
2337 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2339 MonoType *simple_type;
2343 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2347 for (i = 0; i < sig->param_count; ++i) {
2348 if (sig->params [i]->byref) {
2349 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2353 simple_type = sig->params [i];
2354 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2356 switch (simple_type->type) {
2357 case MONO_TYPE_VOID:
2362 case MONO_TYPE_BOOLEAN:
2365 case MONO_TYPE_CHAR:
2368 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2374 case MONO_TYPE_FNPTR:
2375 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2378 case MONO_TYPE_CLASS:
2379 case MONO_TYPE_STRING:
2380 case MONO_TYPE_OBJECT:
2381 case MONO_TYPE_SZARRAY:
2382 case MONO_TYPE_ARRAY:
2383 if (args [i]->type != STACK_OBJ)
2388 if (args [i]->type != STACK_I8)
2392 if (args [i]->type != cfg->r4_stack_type)
2396 if (args [i]->type != STACK_R8)
2399 case MONO_TYPE_VALUETYPE:
2400 if (simple_type->data.klass->enumtype) {
2401 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2404 if (args [i]->type != STACK_VTYPE)
2407 case MONO_TYPE_TYPEDBYREF:
2408 if (args [i]->type != STACK_VTYPE)
2411 case MONO_TYPE_GENERICINST:
2412 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2415 case MONO_TYPE_MVAR:
2417 if (args [i]->type != STACK_VTYPE)
2421 g_error ("unknown type 0x%02x in check_call_signature",
2429 callvirt_to_call (int opcode)
2432 case OP_CALL_MEMBASE:
2434 case OP_VOIDCALL_MEMBASE:
2436 case OP_FCALL_MEMBASE:
2438 case OP_RCALL_MEMBASE:
2440 case OP_VCALL_MEMBASE:
2442 case OP_LCALL_MEMBASE:
2445 g_assert_not_reached ();
2451 /* Either METHOD or IMT_ARG needs to be set */
2453 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2457 if (COMPILE_LLVM (cfg)) {
2458 method_reg = alloc_preg (cfg);
2461 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2462 } else if (cfg->compile_aot) {
2463 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2466 MONO_INST_NEW (cfg, ins, OP_PCONST);
2467 ins->inst_p0 = method;
2468 ins->dreg = method_reg;
2469 MONO_ADD_INS (cfg->cbb, ins);
2473 call->imt_arg_reg = method_reg;
2475 #ifdef MONO_ARCH_IMT_REG
2476 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2478 /* Need this to keep the IMT arg alive */
2479 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2484 #ifdef MONO_ARCH_IMT_REG
2485 method_reg = alloc_preg (cfg);
2488 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2489 } else if (cfg->compile_aot) {
2490 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2493 MONO_INST_NEW (cfg, ins, OP_PCONST);
2494 ins->inst_p0 = method;
2495 ins->dreg = method_reg;
2496 MONO_ADD_INS (cfg->cbb, ins);
2499 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2501 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2505 static MonoJumpInfo *
2506 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2508 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2512 ji->data.target = target;
2518 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2520 if (cfg->generic_sharing_context)
2521 return mono_class_check_context_used (klass);
2527 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2529 if (cfg->generic_sharing_context)
2530 return mono_method_check_context_used (method);
2536 * check_method_sharing:
2538 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2541 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2543 gboolean pass_vtable = FALSE;
2544 gboolean pass_mrgctx = FALSE;
2546 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2547 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2548 gboolean sharable = FALSE;
2550 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2553 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2554 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2555 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2557 sharable = sharing_enabled && context_sharable;
2561 * Pass vtable iff target method might
2562 * be shared, which means that sharing
2563 * is enabled for its class and its
2564 * context is sharable (and it's not a
2567 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2571 if (mini_method_get_context (cmethod) &&
2572 mini_method_get_context (cmethod)->method_inst) {
2573 g_assert (!pass_vtable);
2575 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2578 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2579 MonoGenericContext *context = mini_method_get_context (cmethod);
2580 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2582 if (sharing_enabled && context_sharable)
2584 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2589 if (out_pass_vtable)
2590 *out_pass_vtable = pass_vtable;
2591 if (out_pass_mrgctx)
2592 *out_pass_mrgctx = pass_mrgctx;
2595 inline static MonoCallInst *
2596 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2597 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2601 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2606 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2608 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2610 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2613 call->signature = sig;
2614 call->rgctx_reg = rgctx;
2615 sig_ret = mini_replace_type (sig->ret);
2617 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2620 if (mini_type_is_vtype (cfg, sig_ret)) {
2621 call->vret_var = cfg->vret_addr;
2622 //g_assert_not_reached ();
2624 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2625 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2628 temp->backend.is_pinvoke = sig->pinvoke;
2631 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2632 * address of return value to increase optimization opportunities.
2633 * Before vtype decomposition, the dreg of the call ins itself represents the
2634 * fact the call modifies the return value. After decomposition, the call will
2635 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2636 * will be transformed into an LDADDR.
2638 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2639 loada->dreg = alloc_preg (cfg);
2640 loada->inst_p0 = temp;
2641 /* We reference the call too since call->dreg could change during optimization */
2642 loada->inst_p1 = call;
2643 MONO_ADD_INS (cfg->cbb, loada);
2645 call->inst.dreg = temp->dreg;
2647 call->vret_var = loada;
2648 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2649 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2651 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2652 if (COMPILE_SOFT_FLOAT (cfg)) {
2654 * If the call has a float argument, we would need to do an r8->r4 conversion using
2655 * an icall, but that cannot be done during the call sequence since it would clobber
2656 * the call registers + the stack. So we do it before emitting the call.
2658 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2660 MonoInst *in = call->args [i];
2662 if (i >= sig->hasthis)
2663 t = sig->params [i - sig->hasthis];
2665 t = &mono_defaults.int_class->byval_arg;
2666 t = mono_type_get_underlying_type (t);
2668 if (!t->byref && t->type == MONO_TYPE_R4) {
2669 MonoInst *iargs [1];
2673 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2675 /* The result will be in an int vreg */
2676 call->args [i] = conv;
2682 call->need_unbox_trampoline = unbox_trampoline;
2685 if (COMPILE_LLVM (cfg))
2686 mono_llvm_emit_call (cfg, call);
2688 mono_arch_emit_call (cfg, call);
2690 mono_arch_emit_call (cfg, call);
2693 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2694 cfg->flags |= MONO_CFG_HAS_CALLS;
2700 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2702 #ifdef MONO_ARCH_RGCTX_REG
2703 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2704 cfg->uses_rgctx_reg = TRUE;
2705 call->rgctx_reg = TRUE;
2707 call->rgctx_arg_reg = rgctx_reg;
2714 inline static MonoInst*
2715 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2720 gboolean check_sp = FALSE;
2722 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2723 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2725 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2730 rgctx_reg = mono_alloc_preg (cfg);
2731 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2735 if (!cfg->stack_inbalance_var)
2736 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2738 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2739 ins->dreg = cfg->stack_inbalance_var->dreg;
2740 MONO_ADD_INS (cfg->cbb, ins);
2743 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2745 call->inst.sreg1 = addr->dreg;
2748 emit_imt_argument (cfg, call, NULL, imt_arg);
2750 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2755 sp_reg = mono_alloc_preg (cfg);
2757 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2759 MONO_ADD_INS (cfg->cbb, ins);
2761 /* Restore the stack so we don't crash when throwing the exception */
2762 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2763 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2764 MONO_ADD_INS (cfg->cbb, ins);
2766 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2767 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2771 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2773 return (MonoInst*)call;
2777 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2780 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2782 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2785 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2786 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2788 #ifndef DISABLE_REMOTING
2789 gboolean might_be_remote = FALSE;
2791 gboolean virtual = this != NULL;
2792 gboolean enable_for_aot = TRUE;
2796 gboolean need_unbox_trampoline;
2799 sig = mono_method_signature (method);
2802 rgctx_reg = mono_alloc_preg (cfg);
2803 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2806 if (method->string_ctor) {
2807 /* Create the real signature */
2808 /* FIXME: Cache these */
2809 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2810 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2815 context_used = mini_method_check_context_used (cfg, method);
2817 #ifndef DISABLE_REMOTING
2818 might_be_remote = this && sig->hasthis &&
2819 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2820 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2822 if (might_be_remote && context_used) {
2825 g_assert (cfg->generic_sharing_context);
2827 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2829 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2833 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2835 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2837 #ifndef DISABLE_REMOTING
2838 if (might_be_remote)
2839 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2842 call->method = method;
2843 call->inst.flags |= MONO_INST_HAS_METHOD;
2844 call->inst.inst_left = this;
2845 call->tail_call = tail;
2848 int vtable_reg, slot_reg, this_reg;
2851 this_reg = this->dreg;
2853 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2854 MonoInst *dummy_use;
2856 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2858 /* Make a call to delegate->invoke_impl */
2859 call->inst.inst_basereg = this_reg;
2860 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2861 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2863 /* We must emit a dummy use here because the delegate trampoline will
2864 replace the 'this' argument with the delegate target making this activation
2865 no longer a root for the delegate.
2866 This is an issue for delegates that target collectible code such as dynamic
2867 methods of GC'able assemblies.
2869 For a test case look into #667921.
2871 FIXME: a dummy use is not the best way to do it as the local register allocator
2872 will put it on a caller save register and spil it around the call.
2873 Ideally, we would either put it on a callee save register or only do the store part.
2875 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2877 return (MonoInst*)call;
2880 if ((!cfg->compile_aot || enable_for_aot) &&
2881 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2882 (MONO_METHOD_IS_FINAL (method) &&
2883 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2884 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2886 * the method is not virtual, we just need to ensure this is not null
2887 * and then we can call the method directly.
2889 #ifndef DISABLE_REMOTING
2890 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2892 * The check above ensures method is not gshared, this is needed since
2893 * gshared methods can't have wrappers.
2895 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2899 if (!method->string_ctor)
2900 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2902 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2903 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2905 * the method is virtual, but we can statically dispatch since either
2906 * it's class or the method itself are sealed.
2907 * But first we need to ensure it's not a null reference.
2909 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2911 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2913 vtable_reg = alloc_preg (cfg);
2914 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2915 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2918 guint32 imt_slot = mono_method_get_imt_slot (method);
2919 emit_imt_argument (cfg, call, call->method, imt_arg);
2920 slot_reg = vtable_reg;
2921 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2923 if (slot_reg == -1) {
2924 slot_reg = alloc_preg (cfg);
2925 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2926 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2929 slot_reg = vtable_reg;
2930 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2931 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2933 g_assert (mono_method_signature (method)->generic_param_count);
2934 emit_imt_argument (cfg, call, call->method, imt_arg);
2938 call->inst.sreg1 = slot_reg;
2939 call->inst.inst_offset = offset;
2940 call->virtual = TRUE;
2944 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2947 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2949 return (MonoInst*)call;
2953 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2955 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2959 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2966 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2969 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2971 return (MonoInst*)call;
2975 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2977 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2981 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2985 * mono_emit_abs_call:
2987 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2989 inline static MonoInst*
2990 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2991 MonoMethodSignature *sig, MonoInst **args)
2993 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2997 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
3000 if (cfg->abs_patches == NULL)
3001 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3002 g_hash_table_insert (cfg->abs_patches, ji, ji);
3003 ins = mono_emit_native_call (cfg, ji, sig, args);
3004 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3009 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
3011 gboolean no_wrapper = FALSE;
3014 * Call the jit icall without a wrapper if possible.
3015 * The wrapper is needed for the following reasons:
3016 * - to handle exceptions thrown using mono_raise_exceptions () from the
3017 * icall function. The EH code needs the lmf frame pushed by the
3018 * wrapper to be able to unwind back to managed code.
3019 * - to be able to do stack walks for asynchronously suspended
3020 * threads when debugging.
3022 if (info->no_raise) {
3023 if (cfg->compile_aot) {
3024 // FIXME: This might be loaded into a runtime during debugging
3025 // even if it is not compiled using 'soft-debug'.
3027 if (!cfg->gen_seq_points_debug_data)
3036 if (!info->wrapper_method) {
3037 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3038 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3040 mono_memory_barrier ();
3044 * Inline the wrapper method, which is basically a call to the C icall, and
3045 * an exception check.
3047 costs = inline_method (cfg, info->wrapper_method, NULL,
3048 args, NULL, cfg->real_offset, TRUE, out_cbb);
3049 g_assert (costs > 0);
3050 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3054 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3059 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3061 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3062 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3066 * Native code might return non register sized integers
3067 * without initializing the upper bits.
3069 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3070 case OP_LOADI1_MEMBASE:
3071 widen_op = OP_ICONV_TO_I1;
3073 case OP_LOADU1_MEMBASE:
3074 widen_op = OP_ICONV_TO_U1;
3076 case OP_LOADI2_MEMBASE:
3077 widen_op = OP_ICONV_TO_I2;
3079 case OP_LOADU2_MEMBASE:
3080 widen_op = OP_ICONV_TO_U2;
3086 if (widen_op != -1) {
3087 int dreg = alloc_preg (cfg);
3090 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3091 widen->type = ins->type;
3101 get_memcpy_method (void)
3103 static MonoMethod *memcpy_method = NULL;
3104 if (!memcpy_method) {
3105 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3107 g_error ("Old corlib found. Install a new one");
3109 return memcpy_method;
3113 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3115 MonoClassField *field;
3116 gpointer iter = NULL;
3118 while ((field = mono_class_get_fields (klass, &iter))) {
3121 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3123 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3124 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3125 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3126 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3128 MonoClass *field_class = mono_class_from_mono_type (field->type);
3129 if (field_class->has_references)
3130 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3136 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3138 int card_table_shift_bits;
3139 gpointer card_table_mask;
3141 MonoInst *dummy_use;
3142 int nursery_shift_bits;
3143 size_t nursery_size;
3144 gboolean has_card_table_wb = FALSE;
3146 if (!cfg->gen_write_barriers)
3149 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3151 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3153 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3154 has_card_table_wb = TRUE;
3157 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3160 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3161 wbarrier->sreg1 = ptr->dreg;
3162 wbarrier->sreg2 = value->dreg;
3163 MONO_ADD_INS (cfg->cbb, wbarrier);
3164 } else if (card_table) {
3165 int offset_reg = alloc_preg (cfg);
3166 int card_reg = alloc_preg (cfg);
3169 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3170 if (card_table_mask)
3171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3173 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3174 * IMM's larger than 32bits.
3176 if (cfg->compile_aot) {
3177 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3179 MONO_INST_NEW (cfg, ins, OP_PCONST);
3180 ins->inst_p0 = card_table;
3181 ins->dreg = card_reg;
3182 MONO_ADD_INS (cfg->cbb, ins);
3185 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3186 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3188 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3189 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3192 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3196 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3198 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3199 unsigned need_wb = 0;
3204 /*types with references can't have alignment smaller than sizeof(void*) */
3205 if (align < SIZEOF_VOID_P)
3208 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3209 if (size > 32 * SIZEOF_VOID_P)
3212 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3214 /* We don't unroll more than 5 stores to avoid code bloat. */
3215 if (size > 5 * SIZEOF_VOID_P) {
3216 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3217 size += (SIZEOF_VOID_P - 1);
3218 size &= ~(SIZEOF_VOID_P - 1);
3220 EMIT_NEW_ICONST (cfg, iargs [2], size);
3221 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3222 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3226 destreg = iargs [0]->dreg;
3227 srcreg = iargs [1]->dreg;
3230 dest_ptr_reg = alloc_preg (cfg);
3231 tmp_reg = alloc_preg (cfg);
3234 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3236 while (size >= SIZEOF_VOID_P) {
3237 MonoInst *load_inst;
3238 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3239 load_inst->dreg = tmp_reg;
3240 load_inst->inst_basereg = srcreg;
3241 load_inst->inst_offset = offset;
3242 MONO_ADD_INS (cfg->cbb, load_inst);
3244 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3247 emit_write_barrier (cfg, iargs [0], load_inst);
3249 offset += SIZEOF_VOID_P;
3250 size -= SIZEOF_VOID_P;
3253 /*tmp += sizeof (void*)*/
3254 if (size >= SIZEOF_VOID_P) {
3255 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3256 MONO_ADD_INS (cfg->cbb, iargs [0]);
3260 /* Those cannot be references since size < sizeof (void*) */
3262 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3263 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3269 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3270 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3276 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3277 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3286 * Emit code to copy a valuetype of type @klass whose address is stored in
3287 * @src->dreg to memory whose address is stored at @dest->dreg.
3290 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3292 MonoInst *iargs [4];
3293 int context_used, n;
3295 MonoMethod *memcpy_method;
3296 MonoInst *size_ins = NULL;
3297 MonoInst *memcpy_ins = NULL;
3301 * This check breaks with spilled vars... need to handle it during verification anyway.
3302 * g_assert (klass && klass == src->klass && klass == dest->klass);
3305 if (mini_is_gsharedvt_klass (cfg, klass)) {
3307 context_used = mini_class_check_context_used (cfg, klass);
3308 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3309 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3313 n = mono_class_native_size (klass, &align);
3315 n = mono_class_value_size (klass, &align);
3317 /* if native is true there should be no references in the struct */
3318 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3319 /* Avoid barriers when storing to the stack */
3320 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3321 (dest->opcode == OP_LDADDR))) {
3327 context_used = mini_class_check_context_used (cfg, klass);
3329 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3330 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3332 } else if (context_used) {
3333 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3335 if (cfg->compile_aot) {
3336 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3338 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3339 mono_class_compute_gc_descriptor (klass);
3344 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3346 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3351 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3352 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3353 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3358 iargs [2] = size_ins;
3360 EMIT_NEW_ICONST (cfg, iargs [2], n);
3362 memcpy_method = get_memcpy_method ();
3364 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3366 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3371 get_memset_method (void)
3373 static MonoMethod *memset_method = NULL;
3374 if (!memset_method) {
3375 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3377 g_error ("Old corlib found. Install a new one");
3379 return memset_method;
3383 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3385 MonoInst *iargs [3];
3386 int n, context_used;
3388 MonoMethod *memset_method;
3389 MonoInst *size_ins = NULL;
3390 MonoInst *bzero_ins = NULL;
3391 static MonoMethod *bzero_method;
3393 /* FIXME: Optimize this for the case when dest is an LDADDR */
3395 mono_class_init (klass);
3396 if (mini_is_gsharedvt_klass (cfg, klass)) {
3397 context_used = mini_class_check_context_used (cfg, klass);
3398 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3399 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3401 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3402 g_assert (bzero_method);
3404 iargs [1] = size_ins;
3405 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3409 n = mono_class_value_size (klass, &align);
3411 if (n <= sizeof (gpointer) * 8) {
3412 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3415 memset_method = get_memset_method ();
3417 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3418 EMIT_NEW_ICONST (cfg, iargs [2], n);
3419 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3424 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3426 MonoInst *this = NULL;
3428 g_assert (cfg->generic_sharing_context);
3430 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3431 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3432 !method->klass->valuetype)
3433 EMIT_NEW_ARGLOAD (cfg, this, 0);
3435 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3436 MonoInst *mrgctx_loc, *mrgctx_var;
3439 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3441 mrgctx_loc = mono_get_vtable_var (cfg);
3442 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3445 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3446 MonoInst *vtable_loc, *vtable_var;
3450 vtable_loc = mono_get_vtable_var (cfg);
3451 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3453 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3454 MonoInst *mrgctx_var = vtable_var;
3457 vtable_reg = alloc_preg (cfg);
3458 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3459 vtable_var->type = STACK_PTR;
3467 vtable_reg = alloc_preg (cfg);
3468 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3473 static MonoJumpInfoRgctxEntry *
3474 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3476 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3477 res->method = method;
3478 res->in_mrgctx = in_mrgctx;
3479 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3480 res->data->type = patch_type;
3481 res->data->data.target = patch_data;
3482 res->info_type = info_type;
3487 static inline MonoInst*
3488 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3490 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3494 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3495 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3497 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3498 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3500 return emit_rgctx_fetch (cfg, rgctx, entry);
3504 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3505 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3507 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3508 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3510 return emit_rgctx_fetch (cfg, rgctx, entry);
3514 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3515 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3517 MonoJumpInfoGSharedVtCall *call_info;
3518 MonoJumpInfoRgctxEntry *entry;
3521 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3522 call_info->sig = sig;
3523 call_info->method = cmethod;
3525 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3526 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3528 return emit_rgctx_fetch (cfg, rgctx, entry);
3533 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3534 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3536 MonoJumpInfoRgctxEntry *entry;
3539 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3540 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3542 return emit_rgctx_fetch (cfg, rgctx, entry);
3546 * emit_get_rgctx_method:
3548 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3549 * normal constants, else emit a load from the rgctx.
3552 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3553 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3555 if (!context_used) {
3558 switch (rgctx_type) {
3559 case MONO_RGCTX_INFO_METHOD:
3560 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3562 case MONO_RGCTX_INFO_METHOD_RGCTX:
3563 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3566 g_assert_not_reached ();
3569 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3570 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3572 return emit_rgctx_fetch (cfg, rgctx, entry);
3577 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3578 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3580 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3581 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3583 return emit_rgctx_fetch (cfg, rgctx, entry);
3587 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3589 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3590 MonoRuntimeGenericContextInfoTemplate *template;
3595 for (i = 0; i < info->num_entries; ++i) {
3596 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3598 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3602 if (info->num_entries == info->count_entries) {
3603 MonoRuntimeGenericContextInfoTemplate *new_entries;
3604 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3606 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3608 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3609 info->entries = new_entries;
3610 info->count_entries = new_count_entries;
3613 idx = info->num_entries;
3614 template = &info->entries [idx];
3615 template->info_type = rgctx_type;
3616 template->data = data;
3618 info->num_entries ++;
3624 * emit_get_gsharedvt_info:
3626 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3629 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3634 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3635 /* Load info->entries [idx] */
3636 dreg = alloc_preg (cfg);
3637 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3643 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3645 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3649 * On return the caller must check @klass for load errors.
3652 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3654 MonoInst *vtable_arg;
3658 context_used = mini_class_check_context_used (cfg, klass);
3661 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3662 klass, MONO_RGCTX_INFO_VTABLE);
3664 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3668 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3671 if (COMPILE_LLVM (cfg))
3672 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3674 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3675 #ifdef MONO_ARCH_VTABLE_REG
3676 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3677 cfg->uses_vtable_reg = TRUE;
3684 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3688 if (cfg->gen_seq_points && cfg->method == method) {
3689 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3691 ins->flags |= MONO_INST_NONEMPTY_STACK;
3692 MONO_ADD_INS (cfg->cbb, ins);
3697 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3699 if (mini_get_debug_options ()->better_cast_details) {
3700 int vtable_reg = alloc_preg (cfg);
3701 int klass_reg = alloc_preg (cfg);
3702 MonoBasicBlock *is_null_bb = NULL;
3704 int to_klass_reg, context_used;
3707 NEW_BBLOCK (cfg, is_null_bb);
3709 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3710 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3713 tls_get = mono_get_jit_tls_intrinsic (cfg);
3715 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3719 MONO_ADD_INS (cfg->cbb, tls_get);
3720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3721 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3725 context_used = mini_class_check_context_used (cfg, klass);
3727 MonoInst *class_ins;
3729 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3730 to_klass_reg = class_ins->dreg;
3732 to_klass_reg = alloc_preg (cfg);
3733 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3738 MONO_START_BB (cfg, is_null_bb);
3740 *out_bblock = cfg->cbb;
3746 reset_cast_details (MonoCompile *cfg)
3748 /* Reset the variables holding the cast details */
3749 if (mini_get_debug_options ()->better_cast_details) {
3750 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3752 MONO_ADD_INS (cfg->cbb, tls_get);
3753 /* It is enough to reset the from field */
3754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3759 * On return the caller must check @array_class for load errors
3762 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3764 int vtable_reg = alloc_preg (cfg);
3767 context_used = mini_class_check_context_used (cfg, array_class);
3769 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3771 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3773 if (cfg->opt & MONO_OPT_SHARED) {
3774 int class_reg = alloc_preg (cfg);
3775 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3776 if (cfg->compile_aot) {
3777 int klass_reg = alloc_preg (cfg);
3778 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3779 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3781 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3783 } else if (context_used) {
3784 MonoInst *vtable_ins;
3786 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3787 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3789 if (cfg->compile_aot) {
3793 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3795 vt_reg = alloc_preg (cfg);
3796 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3797 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3800 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3802 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3806 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3808 reset_cast_details (cfg);
3812 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3813 * generic code is generated.
3816 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3818 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3821 MonoInst *rgctx, *addr;
3823 /* FIXME: What if the class is shared? We might not
3824 have to get the address of the method from the
3826 addr = emit_get_rgctx_method (cfg, context_used, method,
3827 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3829 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3831 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3833 gboolean pass_vtable, pass_mrgctx;
3834 MonoInst *rgctx_arg = NULL;
3836 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3837 g_assert (!pass_mrgctx);
3840 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3843 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3846 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3851 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3855 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3856 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3857 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3858 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3860 obj_reg = sp [0]->dreg;
3861 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3862 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3864 /* FIXME: generics */
3865 g_assert (klass->rank == 0);
3868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3869 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3871 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3872 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3875 MonoInst *element_class;
3877 /* This assertion is from the unboxcast insn */
3878 g_assert (klass->rank == 0);
3880 element_class = emit_get_rgctx_klass (cfg, context_used,
3881 klass->element_class, MONO_RGCTX_INFO_KLASS);
3883 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3884 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3886 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3887 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3888 reset_cast_details (cfg);
3891 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3892 MONO_ADD_INS (cfg->cbb, add);
3893 add->type = STACK_MP;
3900 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3902 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3903 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3907 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3913 args [1] = klass_inst;
3916 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3918 NEW_BBLOCK (cfg, is_ref_bb);
3919 NEW_BBLOCK (cfg, is_nullable_bb);
3920 NEW_BBLOCK (cfg, end_bb);
3921 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3925 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3926 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3928 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3929 addr_reg = alloc_dreg (cfg, STACK_MP);
3933 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3934 MONO_ADD_INS (cfg->cbb, addr);
3936 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3939 MONO_START_BB (cfg, is_ref_bb);
3941 /* Save the ref to a temporary */
3942 dreg = alloc_ireg (cfg);
3943 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3944 addr->dreg = addr_reg;
3945 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3946 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3949 MONO_START_BB (cfg, is_nullable_bb);
3952 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3953 MonoInst *unbox_call;
3954 MonoMethodSignature *unbox_sig;
3957 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3959 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3960 unbox_sig->ret = &klass->byval_arg;
3961 unbox_sig->param_count = 1;
3962 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3963 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3965 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3966 addr->dreg = addr_reg;
3969 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3972 MONO_START_BB (cfg, end_bb);
3975 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3977 *out_cbb = cfg->cbb;
3983 * Returns NULL and set the cfg exception on error.
3986 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3988 MonoInst *iargs [2];
3994 MonoInst *iargs [2];
3995 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3997 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3999 if (cfg->opt & MONO_OPT_SHARED)
4000 rgctx_info = MONO_RGCTX_INFO_KLASS;
4002 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4003 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4005 if (cfg->opt & MONO_OPT_SHARED) {
4006 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4008 alloc_ftn = mono_object_new;
4011 alloc_ftn = mono_object_new_specific;
4014 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4015 if (known_instance_size)
4016 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
4017 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4020 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4023 if (cfg->opt & MONO_OPT_SHARED) {
4024 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4025 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4027 alloc_ftn = mono_object_new;
4028 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4029 /* This happens often in argument checking code, eg. throw new FooException... */
4030 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4031 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4032 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4034 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4035 MonoMethod *managed_alloc = NULL;
4039 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4040 cfg->exception_ptr = klass;
4044 #ifndef MONO_CROSS_COMPILE
4045 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4048 if (managed_alloc) {
4049 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4050 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
4051 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4053 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4055 guint32 lw = vtable->klass->instance_size;
4056 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4057 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4058 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4061 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4065 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4069 * Returns NULL and set the cfg exception on error.
4072 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4074 MonoInst *alloc, *ins;
4076 *out_cbb = cfg->cbb;
4078 if (mono_class_is_nullable (klass)) {
4079 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4082 /* FIXME: What if the class is shared? We might not
4083 have to get the method address from the RGCTX. */
4084 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4085 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4086 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4088 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4090 gboolean pass_vtable, pass_mrgctx;
4091 MonoInst *rgctx_arg = NULL;
4093 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4094 g_assert (!pass_mrgctx);
4097 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4100 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4103 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4107 if (mini_is_gsharedvt_klass (cfg, klass)) {
4108 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4109 MonoInst *res, *is_ref, *src_var, *addr;
4112 dreg = alloc_ireg (cfg);
4114 NEW_BBLOCK (cfg, is_ref_bb);
4115 NEW_BBLOCK (cfg, is_nullable_bb);
4116 NEW_BBLOCK (cfg, end_bb);
4117 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4118 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4119 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4125 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4128 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4129 ins->opcode = OP_STOREV_MEMBASE;
4131 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4132 res->type = STACK_OBJ;
4134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4137 MONO_START_BB (cfg, is_ref_bb);
4138 addr_reg = alloc_ireg (cfg);
4140 /* val is a vtype, so has to load the value manually */
4141 src_var = get_vreg_to_inst (cfg, val->dreg);
4143 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4144 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4149 MONO_START_BB (cfg, is_nullable_bb);
4152 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4153 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4155 MonoMethodSignature *box_sig;
4158 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4159 * construct that method at JIT time, so have to do things by hand.
4161 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4162 box_sig->ret = &mono_defaults.object_class->byval_arg;
4163 box_sig->param_count = 1;
4164 box_sig->params [0] = &klass->byval_arg;
4165 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4166 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4167 res->type = STACK_OBJ;
4171 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4173 MONO_START_BB (cfg, end_bb);
4175 *out_cbb = cfg->cbb;
4179 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4183 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4190 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4193 MonoGenericContainer *container;
4194 MonoGenericInst *ginst;
4196 if (klass->generic_class) {
4197 container = klass->generic_class->container_class->generic_container;
4198 ginst = klass->generic_class->context.class_inst;
4199 } else if (klass->generic_container && context_used) {
4200 container = klass->generic_container;
4201 ginst = container->context.class_inst;
4206 for (i = 0; i < container->type_argc; ++i) {
4208 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4210 type = ginst->type_argv [i];
4211 if (mini_type_is_reference (cfg, type))
4217 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4220 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4222 MonoMethod *mono_castclass;
4225 mono_castclass = mono_marshal_get_castclass_with_cache ();
4227 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4228 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4229 reset_cast_details (cfg);
4230 *out_bblock = cfg->cbb;
4236 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4245 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4248 if (cfg->compile_aot) {
4249 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4250 cfg->castclass_cache_index ++;
4251 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4252 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4254 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4257 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4259 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4263 * Returns NULL and set the cfg exception on error.
4266 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4268 MonoBasicBlock *is_null_bb;
4269 int obj_reg = src->dreg;
4270 int vtable_reg = alloc_preg (cfg);
4272 MonoInst *klass_inst = NULL, *res;
4273 MonoBasicBlock *bblock;
4277 context_used = mini_class_check_context_used (cfg, klass);
4279 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4280 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4281 (*inline_costs) += 2;
4284 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4285 MonoMethod *mono_castclass;
4286 MonoInst *iargs [1];
4289 mono_castclass = mono_marshal_get_castclass (klass);
4292 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4293 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4294 iargs, ip, cfg->real_offset, TRUE, &bblock);
4295 reset_cast_details (cfg);
4296 CHECK_CFG_EXCEPTION;
4297 g_assert (costs > 0);
4299 cfg->real_offset += 5;
4301 (*inline_costs) += costs;
4310 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4311 MonoInst *cache_ins;
4313 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4318 /* klass - it's the second element of the cache entry*/
4319 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4322 args [2] = cache_ins;
4324 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4327 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4330 NEW_BBLOCK (cfg, is_null_bb);
4332 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4335 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4337 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4338 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4339 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4341 int klass_reg = alloc_preg (cfg);
4343 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4345 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4346 /* the remoting code is broken, access the class for now */
4347 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4348 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4350 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4351 cfg->exception_ptr = klass;
4354 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4357 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4359 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4362 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4366 MONO_START_BB (cfg, is_null_bb);
4368 reset_cast_details (cfg);
4379 * Returns NULL and set the cfg exception on error.
4382 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4385 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4386 int obj_reg = src->dreg;
4387 int vtable_reg = alloc_preg (cfg);
4388 int res_reg = alloc_ireg_ref (cfg);
4389 MonoInst *klass_inst = NULL;
4394 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4395 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4396 MonoInst *cache_ins;
4398 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4403 /* klass - it's the second element of the cache entry*/
4404 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4407 args [2] = cache_ins;
4409 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4412 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4415 NEW_BBLOCK (cfg, is_null_bb);
4416 NEW_BBLOCK (cfg, false_bb);
4417 NEW_BBLOCK (cfg, end_bb);
4419 /* Do the assignment at the beginning, so the other assignment can be if converted */
4420 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4421 ins->type = STACK_OBJ;
4424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4425 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4427 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4429 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4430 g_assert (!context_used);
4431 /* the is_null_bb target simply copies the input register to the output */
4432 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4434 int klass_reg = alloc_preg (cfg);
4437 int rank_reg = alloc_preg (cfg);
4438 int eclass_reg = alloc_preg (cfg);
4440 g_assert (!context_used);
4441 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4443 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4446 if (klass->cast_class == mono_defaults.object_class) {
4447 int parent_reg = alloc_preg (cfg);
4448 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4449 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4450 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4451 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4452 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4453 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4454 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4456 } else if (klass->cast_class == mono_defaults.enum_class) {
4457 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4459 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4460 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4462 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4463 /* Check that the object is a vector too */
4464 int bounds_reg = alloc_preg (cfg);
4465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4467 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4470 /* the is_null_bb target simply copies the input register to the output */
4471 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4473 } else if (mono_class_is_nullable (klass)) {
4474 g_assert (!context_used);
4475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4476 /* the is_null_bb target simply copies the input register to the output */
4477 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4479 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4480 g_assert (!context_used);
4481 /* the remoting code is broken, access the class for now */
4482 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4483 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4485 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4486 cfg->exception_ptr = klass;
4489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4491 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4492 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4494 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4497 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4498 /* the is_null_bb target simply copies the input register to the output */
4499 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4504 MONO_START_BB (cfg, false_bb);
4506 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4507 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4509 MONO_START_BB (cfg, is_null_bb);
4511 MONO_START_BB (cfg, end_bb);
4517 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4519 /* This opcode takes as input an object reference and a class, and returns:
4520 0) if the object is an instance of the class,
4521 1) if the object is not instance of the class,
4522 2) if the object is a proxy whose type cannot be determined */
4525 #ifndef DISABLE_REMOTING
4526 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4528 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4530 int obj_reg = src->dreg;
4531 int dreg = alloc_ireg (cfg);
4533 #ifndef DISABLE_REMOTING
4534 int klass_reg = alloc_preg (cfg);
4537 NEW_BBLOCK (cfg, true_bb);
4538 NEW_BBLOCK (cfg, false_bb);
4539 NEW_BBLOCK (cfg, end_bb);
4540 #ifndef DISABLE_REMOTING
4541 NEW_BBLOCK (cfg, false2_bb);
4542 NEW_BBLOCK (cfg, no_proxy_bb);
4545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4548 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4549 #ifndef DISABLE_REMOTING
4550 NEW_BBLOCK (cfg, interface_fail_bb);
4553 tmp_reg = alloc_preg (cfg);
4554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4555 #ifndef DISABLE_REMOTING
4556 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4557 MONO_START_BB (cfg, interface_fail_bb);
4558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4560 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4562 tmp_reg = alloc_preg (cfg);
4563 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4567 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4570 #ifndef DISABLE_REMOTING
4571 tmp_reg = alloc_preg (cfg);
4572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4575 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4576 tmp_reg = alloc_preg (cfg);
4577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4580 tmp_reg = alloc_preg (cfg);
4581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4585 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4586 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4588 MONO_START_BB (cfg, no_proxy_bb);
4590 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4592 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4596 MONO_START_BB (cfg, false_bb);
4598 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4599 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4601 #ifndef DISABLE_REMOTING
4602 MONO_START_BB (cfg, false2_bb);
4604 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4608 MONO_START_BB (cfg, true_bb);
4610 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4612 MONO_START_BB (cfg, end_bb);
4615 MONO_INST_NEW (cfg, ins, OP_ICONST);
4617 ins->type = STACK_I4;
4623 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4625 /* This opcode takes as input an object reference and a class, and returns:
4626 0) if the object is an instance of the class,
4627 1) if the object is a proxy whose type cannot be determined
4628 an InvalidCastException exception is thrown otherwhise*/
4631 #ifndef DISABLE_REMOTING
4632 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4634 MonoBasicBlock *ok_result_bb;
4636 int obj_reg = src->dreg;
4637 int dreg = alloc_ireg (cfg);
4638 int tmp_reg = alloc_preg (cfg);
4640 #ifndef DISABLE_REMOTING
4641 int klass_reg = alloc_preg (cfg);
4642 NEW_BBLOCK (cfg, end_bb);
4645 NEW_BBLOCK (cfg, ok_result_bb);
4647 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4648 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4650 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4652 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4653 #ifndef DISABLE_REMOTING
4654 NEW_BBLOCK (cfg, interface_fail_bb);
4656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4657 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4658 MONO_START_BB (cfg, interface_fail_bb);
4659 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4661 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4663 tmp_reg = alloc_preg (cfg);
4664 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4665 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4666 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4668 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4669 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4672 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4673 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4676 #ifndef DISABLE_REMOTING
4677 NEW_BBLOCK (cfg, no_proxy_bb);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4680 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4681 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4683 tmp_reg = alloc_preg (cfg);
4684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4687 tmp_reg = alloc_preg (cfg);
4688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4690 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4692 NEW_BBLOCK (cfg, fail_1_bb);
4694 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4696 MONO_START_BB (cfg, fail_1_bb);
4698 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4699 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4701 MONO_START_BB (cfg, no_proxy_bb);
4703 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4705 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4709 MONO_START_BB (cfg, ok_result_bb);
4711 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4713 #ifndef DISABLE_REMOTING
4714 MONO_START_BB (cfg, end_bb);
4718 MONO_INST_NEW (cfg, ins, OP_ICONST);
4720 ins->type = STACK_I4;
4725 static G_GNUC_UNUSED MonoInst*
4726 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4728 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4729 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4730 gboolean is_i4 = TRUE;
4732 switch (enum_type->type) {
4735 #if SIZEOF_REGISTER == 8
4744 MonoInst *load, *and, *cmp, *ceq;
4745 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4746 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4747 int dest_reg = alloc_ireg (cfg);
4749 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4750 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4751 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4752 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4754 ceq->type = STACK_I4;
4757 load = mono_decompose_opcode (cfg, load, NULL);
4758 and = mono_decompose_opcode (cfg, and, NULL);
4759 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4760 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4768 * Returns NULL and set the cfg exception on error.
4770 static G_GNUC_UNUSED MonoInst*
4771 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4775 gpointer trampoline;
4776 MonoInst *obj, *method_ins, *tramp_ins;
4780 // FIXME reenable optimisation for virtual case
4785 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4788 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4792 obj = handle_alloc (cfg, klass, FALSE, 0);
4796 /* Inline the contents of mono_delegate_ctor */
4798 /* Set target field */
4799 /* Optimize away setting of NULL target */
4800 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4802 if (cfg->gen_write_barriers) {
4803 dreg = alloc_preg (cfg);
4804 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4805 emit_write_barrier (cfg, ptr, target);
4809 /* Set method field */
4810 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4814 * To avoid looking up the compiled code belonging to the target method
4815 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4816 * store it, and we fill it after the method has been compiled.
4818 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4819 MonoInst *code_slot_ins;
4822 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4824 domain = mono_domain_get ();
4825 mono_domain_lock (domain);
4826 if (!domain_jit_info (domain)->method_code_hash)
4827 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4828 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4830 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4831 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4833 mono_domain_unlock (domain);
4835 if (cfg->compile_aot)
4836 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4838 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4840 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4843 if (cfg->compile_aot) {
4844 MonoDelegateClassMethodPair *del_tramp;
4846 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4847 del_tramp->klass = klass;
4848 del_tramp->method = context_used ? NULL : method;
4849 del_tramp->virtual = virtual;
4850 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4853 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4855 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4856 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4859 /* Set invoke_impl field */
4861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4863 dreg = alloc_preg (cfg);
4864 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4867 dreg = alloc_preg (cfg);
4868 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4869 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4872 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4878 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4880 MonoJitICallInfo *info;
4882 /* Need to register the icall so it gets an icall wrapper */
4883 info = mono_get_array_new_va_icall (rank);
4885 cfg->flags |= MONO_CFG_HAS_VARARGS;
4887 /* mono_array_new_va () needs a vararg calling convention */
4888 cfg->disable_llvm = TRUE;
4890 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4891 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4895 * handle_constrained_gsharedvt_call:
4897 * Handle constrained calls where the receiver is a gsharedvt type.
4898 * Return the instruction representing the call. Set the cfg exception on failure.
4901 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_call,
4902 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4904 MonoInst *ins = NULL;
4905 MonoBasicBlock *bblock = *ref_bblock;
4906 gboolean emit_widen = *ref_emit_widen;
4909 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4910 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4911 * pack the arguments into an array, and do the rest of the work in in an icall.
4913 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4914 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4915 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4916 MonoInst *args [16];
4919 * This case handles calls to
4920 * - object:ToString()/Equals()/GetHashCode(),
4921 * - System.IComparable<T>:CompareTo()
4922 * - System.IEquatable<T>:Equals ()
4923 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4927 if (mono_method_check_context_used (cmethod))
4928 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4930 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4931 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
4933 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4934 if (fsig->hasthis && fsig->param_count) {
4935 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4936 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4937 ins->dreg = alloc_preg (cfg);
4938 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4939 MONO_ADD_INS (cfg->cbb, ins);
4942 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4945 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4947 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4948 addr_reg = ins->dreg;
4949 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4951 EMIT_NEW_ICONST (cfg, args [3], 0);
4952 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4955 EMIT_NEW_ICONST (cfg, args [3], 0);
4956 EMIT_NEW_ICONST (cfg, args [4], 0);
4958 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4961 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
4962 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
4963 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4967 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4968 MONO_ADD_INS (cfg->cbb, add);
4970 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4971 MONO_ADD_INS (cfg->cbb, ins);
4972 /* ins represents the call result */
4975 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4978 *ref_emit_widen = emit_widen;
4979 *ref_bblock = bblock;
4988 mono_emit_load_got_addr (MonoCompile *cfg)
4990 MonoInst *getaddr, *dummy_use;
4992 if (!cfg->got_var || cfg->got_var_allocated)
4995 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4996 getaddr->cil_code = cfg->header->code;
4997 getaddr->dreg = cfg->got_var->dreg;
4999 /* Add it to the start of the first bblock */
5000 if (cfg->bb_entry->code) {
5001 getaddr->next = cfg->bb_entry->code;
5002 cfg->bb_entry->code = getaddr;
5005 MONO_ADD_INS (cfg->bb_entry, getaddr);
5007 cfg->got_var_allocated = TRUE;
5010 * Add a dummy use to keep the got_var alive, since real uses might
5011 * only be generated by the back ends.
5012 * Add it to end_bblock, so the variable's lifetime covers the whole
5014 * It would be better to make the usage of the got var explicit in all
5015 * cases when the backend needs it (i.e. calls, throw etc.), so this
5016 * wouldn't be needed.
5018 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5019 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5022 static int inline_limit;
5023 static gboolean inline_limit_inited;
5026 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5028 MonoMethodHeaderSummary header;
5030 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5031 MonoMethodSignature *sig = mono_method_signature (method);
5035 if (cfg->disable_inline)
5037 if (cfg->generic_sharing_context)
5040 if (cfg->inline_depth > 10)
5043 #ifdef MONO_ARCH_HAVE_LMF_OPS
5044 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5045 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5046 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5051 if (!mono_method_get_header_summary (method, &header))
5054 /*runtime, icall and pinvoke are checked by summary call*/
5055 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5056 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5057 (mono_class_is_marshalbyref (method->klass)) ||
5061 /* also consider num_locals? */
5062 /* Do the size check early to avoid creating vtables */
5063 if (!inline_limit_inited) {
5064 if (g_getenv ("MONO_INLINELIMIT"))
5065 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5067 inline_limit = INLINE_LENGTH_LIMIT;
5068 inline_limit_inited = TRUE;
5070 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5074 * if we can initialize the class of the method right away, we do,
5075 * otherwise we don't allow inlining if the class needs initialization,
5076 * since it would mean inserting a call to mono_runtime_class_init()
5077 * inside the inlined code
5079 if (!(cfg->opt & MONO_OPT_SHARED)) {
5080 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5081 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5082 vtable = mono_class_vtable (cfg->domain, method->klass);
5085 if (!cfg->compile_aot)
5086 mono_runtime_class_init (vtable);
5087 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5088 if (cfg->run_cctors && method->klass->has_cctor) {
5089 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5090 if (!method->klass->runtime_info)
5091 /* No vtable created yet */
5093 vtable = mono_class_vtable (cfg->domain, method->klass);
5096 /* This makes so that inline cannot trigger */
5097 /* .cctors: too many apps depend on them */
5098 /* running with a specific order... */
5099 if (! vtable->initialized)
5101 mono_runtime_class_init (vtable);
5103 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5104 if (!method->klass->runtime_info)
5105 /* No vtable created yet */
5107 vtable = mono_class_vtable (cfg->domain, method->klass);
5110 if (!vtable->initialized)
5115 * If we're compiling for shared code
5116 * the cctor will need to be run at aot method load time, for example,
5117 * or at the end of the compilation of the inlining method.
5119 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5124 * CAS - do not inline methods with declarative security
5125 * Note: this has to be before any possible return TRUE;
5127 if (mono_security_method_has_declsec (method))
5130 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5131 if (mono_arch_is_soft_float ()) {
5133 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5135 for (i = 0; i < sig->param_count; ++i)
5136 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5141 if (g_list_find (cfg->dont_inline, method))
5148 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5150 if (!cfg->compile_aot) {
5152 if (vtable->initialized)
5156 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5157 if (cfg->method == method)
5161 if (!mono_class_needs_cctor_run (klass, method))
5164 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5165 /* The initialization is already done before the method is called */
5172 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5176 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5179 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5182 mono_class_init (klass);
5183 size = mono_class_array_element_size (klass);
5186 mult_reg = alloc_preg (cfg);
5187 array_reg = arr->dreg;
5188 index_reg = index->dreg;
5190 #if SIZEOF_REGISTER == 8
5191 /* The array reg is 64 bits but the index reg is only 32 */
5192 if (COMPILE_LLVM (cfg)) {
5194 index2_reg = index_reg;
5196 index2_reg = alloc_preg (cfg);
5197 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5200 if (index->type == STACK_I8) {
5201 index2_reg = alloc_preg (cfg);
5202 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5204 index2_reg = index_reg;
5209 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5211 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5212 if (size == 1 || size == 2 || size == 4 || size == 8) {
5213 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5215 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5216 ins->klass = mono_class_get_element_class (klass);
5217 ins->type = STACK_MP;
5223 add_reg = alloc_ireg_mp (cfg);
5226 MonoInst *rgctx_ins;
5229 g_assert (cfg->generic_sharing_context);
5230 context_used = mini_class_check_context_used (cfg, klass);
5231 g_assert (context_used);
5232 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5233 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5235 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5237 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5238 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5239 ins->klass = mono_class_get_element_class (klass);
5240 ins->type = STACK_MP;
5241 MONO_ADD_INS (cfg->cbb, ins);
5246 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5248 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5250 int bounds_reg = alloc_preg (cfg);
5251 int add_reg = alloc_ireg_mp (cfg);
5252 int mult_reg = alloc_preg (cfg);
5253 int mult2_reg = alloc_preg (cfg);
5254 int low1_reg = alloc_preg (cfg);
5255 int low2_reg = alloc_preg (cfg);
5256 int high1_reg = alloc_preg (cfg);
5257 int high2_reg = alloc_preg (cfg);
5258 int realidx1_reg = alloc_preg (cfg);
5259 int realidx2_reg = alloc_preg (cfg);
5260 int sum_reg = alloc_preg (cfg);
5261 int index1, index2, tmpreg;
5265 mono_class_init (klass);
5266 size = mono_class_array_element_size (klass);
5268 index1 = index_ins1->dreg;
5269 index2 = index_ins2->dreg;
5271 #if SIZEOF_REGISTER == 8
5272 /* The array reg is 64 bits but the index reg is only 32 */
5273 if (COMPILE_LLVM (cfg)) {
5276 tmpreg = alloc_preg (cfg);
5277 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5279 tmpreg = alloc_preg (cfg);
5280 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5284 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5288 /* range checking */
5289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5290 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5293 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5294 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5295 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5296 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5297 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5298 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5300 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5301 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5302 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5303 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5304 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5305 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5306 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5308 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5309 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5310 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5311 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5312 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5314 ins->type = STACK_MP;
5316 MONO_ADD_INS (cfg->cbb, ins);
5323 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5327 MonoMethod *addr_method;
5329 MonoClass *eclass = cmethod->klass->element_class;
5331 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5334 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5336 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5337 /* emit_ldelema_2 depends on OP_LMUL */
5338 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5339 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5343 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5346 element_size = mono_class_array_element_size (eclass);
5347 addr_method = mono_marshal_get_array_address (rank, element_size);
5348 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5353 static MonoBreakPolicy
5354 always_insert_breakpoint (MonoMethod *method)
5356 return MONO_BREAK_POLICY_ALWAYS;
5359 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5362 * mono_set_break_policy:
5363 * policy_callback: the new callback function
5365 * Allow embedders to decide wherther to actually obey breakpoint instructions
5366 * (both break IL instructions and Debugger.Break () method calls), for example
5367 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5368 * untrusted or semi-trusted code.
5370 * @policy_callback will be called every time a break point instruction needs to
5371 * be inserted with the method argument being the method that calls Debugger.Break()
5372 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5373 * if it wants the breakpoint to not be effective in the given method.
5374 * #MONO_BREAK_POLICY_ALWAYS is the default.
5377 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5379 if (policy_callback)
5380 break_policy_func = policy_callback;
5382 break_policy_func = always_insert_breakpoint;
5386 should_insert_brekpoint (MonoMethod *method) {
5387 switch (break_policy_func (method)) {
5388 case MONO_BREAK_POLICY_ALWAYS:
5390 case MONO_BREAK_POLICY_NEVER:
5392 case MONO_BREAK_POLICY_ON_DBG:
5393 g_warning ("mdb no longer supported");
5396 g_warning ("Incorrect value returned from break policy callback");
5401 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5403 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5405 MonoInst *addr, *store, *load;
5406 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5408 /* the bounds check is already done by the callers */
5409 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5411 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5412 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5413 if (mini_type_is_reference (cfg, fsig->params [2]))
5414 emit_write_barrier (cfg, addr, load);
5416 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5417 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5424 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5426 return mini_type_is_reference (cfg, &klass->byval_arg);
5430 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5432 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5433 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5434 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5435 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5436 MonoInst *iargs [3];
5439 mono_class_setup_vtable (obj_array);
5440 g_assert (helper->slot);
5442 if (sp [0]->type != STACK_OBJ)
5444 if (sp [2]->type != STACK_OBJ)
5451 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5455 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5458 // FIXME-VT: OP_ICONST optimization
5459 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5460 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5461 ins->opcode = OP_STOREV_MEMBASE;
5462 } else if (sp [1]->opcode == OP_ICONST) {
5463 int array_reg = sp [0]->dreg;
5464 int index_reg = sp [1]->dreg;
5465 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5468 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5469 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5471 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5472 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5473 if (generic_class_is_reference_type (cfg, klass))
5474 emit_write_barrier (cfg, addr, sp [2]);
5481 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5486 eklass = mono_class_from_mono_type (fsig->params [2]);
5488 eklass = mono_class_from_mono_type (fsig->ret);
5491 return emit_array_store (cfg, eklass, args, FALSE);
5493 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5494 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5500 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5504 //Only allow for valuetypes
5505 if (!param_klass->valuetype || !return_klass->valuetype)
5509 if (param_klass->has_references || return_klass->has_references)
5512 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5513 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5514 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5517 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5518 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5521 //And have the same size
5522 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5528 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5530 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5531 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5533 //Valuetypes that are semantically equivalent
5534 if (is_unsafe_mov_compatible (param_klass, return_klass))
5537 //Arrays of valuetypes that are semantically equivalent
5538 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5545 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5547 #ifdef MONO_ARCH_SIMD_INTRINSICS
5548 MonoInst *ins = NULL;
5550 if (cfg->opt & MONO_OPT_SIMD) {
5551 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5557 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5561 emit_memory_barrier (MonoCompile *cfg, int kind)
5563 MonoInst *ins = NULL;
5564 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5565 MONO_ADD_INS (cfg->cbb, ins);
5566 ins->backend.memory_barrier_kind = kind;
5572 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5574 MonoInst *ins = NULL;
5577 /* The LLVM backend supports these intrinsics */
5578 if (cmethod->klass == mono_defaults.math_class) {
5579 if (strcmp (cmethod->name, "Sin") == 0) {
5581 } else if (strcmp (cmethod->name, "Cos") == 0) {
5583 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5585 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5589 if (opcode && fsig->param_count == 1) {
5590 MONO_INST_NEW (cfg, ins, opcode);
5591 ins->type = STACK_R8;
5592 ins->dreg = mono_alloc_freg (cfg);
5593 ins->sreg1 = args [0]->dreg;
5594 MONO_ADD_INS (cfg->cbb, ins);
5598 if (cfg->opt & MONO_OPT_CMOV) {
5599 if (strcmp (cmethod->name, "Min") == 0) {
5600 if (fsig->params [0]->type == MONO_TYPE_I4)
5602 if (fsig->params [0]->type == MONO_TYPE_U4)
5603 opcode = OP_IMIN_UN;
5604 else if (fsig->params [0]->type == MONO_TYPE_I8)
5606 else if (fsig->params [0]->type == MONO_TYPE_U8)
5607 opcode = OP_LMIN_UN;
5608 } else if (strcmp (cmethod->name, "Max") == 0) {
5609 if (fsig->params [0]->type == MONO_TYPE_I4)
5611 if (fsig->params [0]->type == MONO_TYPE_U4)
5612 opcode = OP_IMAX_UN;
5613 else if (fsig->params [0]->type == MONO_TYPE_I8)
5615 else if (fsig->params [0]->type == MONO_TYPE_U8)
5616 opcode = OP_LMAX_UN;
5620 if (opcode && fsig->param_count == 2) {
5621 MONO_INST_NEW (cfg, ins, opcode);
5622 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5623 ins->dreg = mono_alloc_ireg (cfg);
5624 ins->sreg1 = args [0]->dreg;
5625 ins->sreg2 = args [1]->dreg;
5626 MONO_ADD_INS (cfg->cbb, ins);
5634 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5636 if (cmethod->klass == mono_defaults.array_class) {
5637 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5638 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5639 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5640 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5641 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5642 return emit_array_unsafe_mov (cfg, fsig, args);
5649 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5651 MonoInst *ins = NULL;
5653 static MonoClass *runtime_helpers_class = NULL;
5654 if (! runtime_helpers_class)
5655 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5656 "System.Runtime.CompilerServices", "RuntimeHelpers");
5658 if (cmethod->klass == mono_defaults.string_class) {
5659 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
5660 int dreg = alloc_ireg (cfg);
5661 int index_reg = alloc_preg (cfg);
5662 int mult_reg = alloc_preg (cfg);
5663 int add_reg = alloc_preg (cfg);
5665 #if SIZEOF_REGISTER == 8
5666 /* The array reg is 64 bits but the index reg is only 32 */
5667 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5669 index_reg = args [1]->dreg;
5671 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5673 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5674 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5675 add_reg = ins->dreg;
5676 /* Avoid a warning */
5678 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5682 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5683 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5684 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5686 type_from_op (cfg, ins, NULL, NULL);
5688 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5689 int dreg = alloc_ireg (cfg);
5690 /* Decompose later to allow more optimizations */
5691 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5692 ins->type = STACK_I4;
5693 ins->flags |= MONO_INST_FAULT;
5694 cfg->cbb->has_array_access = TRUE;
5695 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5698 } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
5699 int mult_reg = alloc_preg (cfg);
5700 int add_reg = alloc_preg (cfg);
5702 /* The corlib functions check for oob already. */
5703 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5704 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5706 return cfg->cbb->last_ins;
5709 } else if (cmethod->klass == mono_defaults.object_class) {
5711 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
5712 int dreg = alloc_ireg_ref (cfg);
5713 int vt_reg = alloc_preg (cfg);
5714 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5715 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5716 type_from_op (cfg, ins, NULL, NULL);
5719 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5720 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5721 int dreg = alloc_ireg (cfg);
5722 int t1 = alloc_ireg (cfg);
5724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5725 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5726 ins->type = STACK_I4;
5730 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5731 MONO_INST_NEW (cfg, ins, OP_NOP);
5732 MONO_ADD_INS (cfg->cbb, ins);
5736 } else if (cmethod->klass == mono_defaults.array_class) {
5737 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5738 return emit_array_generic_access (cfg, fsig, args, FALSE);
5739 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5740 return emit_array_generic_access (cfg, fsig, args, TRUE);
5742 #ifndef MONO_BIG_ARRAYS
5744 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5747 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
5748 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
5749 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5750 int dreg = alloc_ireg (cfg);
5751 int bounds_reg = alloc_ireg_mp (cfg);
5752 MonoBasicBlock *end_bb, *szarray_bb;
5753 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5755 NEW_BBLOCK (cfg, end_bb);
5756 NEW_BBLOCK (cfg, szarray_bb);
5758 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5759 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5761 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5762 /* Non-szarray case */
5764 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5765 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5767 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5768 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5770 MONO_START_BB (cfg, szarray_bb);
5773 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5774 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5776 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5777 MONO_START_BB (cfg, end_bb);
5779 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5780 ins->type = STACK_I4;
5786 if (cmethod->name [0] != 'g')
5789 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
5790 int dreg = alloc_ireg (cfg);
5791 int vtable_reg = alloc_preg (cfg);
5792 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5793 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5794 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5795 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5796 type_from_op (cfg, ins, NULL, NULL);
5799 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5800 int dreg = alloc_ireg (cfg);
5802 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5803 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5804 type_from_op (cfg, ins, NULL, NULL);
5809 } else if (cmethod->klass == runtime_helpers_class) {
5811 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5812 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5816 } else if (cmethod->klass == mono_defaults.thread_class) {
5817 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5818 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5819 MONO_ADD_INS (cfg->cbb, ins);
5821 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5822 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5823 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5825 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5827 if (fsig->params [0]->type == MONO_TYPE_I1)
5828 opcode = OP_LOADI1_MEMBASE;
5829 else if (fsig->params [0]->type == MONO_TYPE_U1)
5830 opcode = OP_LOADU1_MEMBASE;
5831 else if (fsig->params [0]->type == MONO_TYPE_I2)
5832 opcode = OP_LOADI2_MEMBASE;
5833 else if (fsig->params [0]->type == MONO_TYPE_U2)
5834 opcode = OP_LOADU2_MEMBASE;
5835 else if (fsig->params [0]->type == MONO_TYPE_I4)
5836 opcode = OP_LOADI4_MEMBASE;
5837 else if (fsig->params [0]->type == MONO_TYPE_U4)
5838 opcode = OP_LOADU4_MEMBASE;
5839 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5840 opcode = OP_LOADI8_MEMBASE;
5841 else if (fsig->params [0]->type == MONO_TYPE_R4)
5842 opcode = OP_LOADR4_MEMBASE;
5843 else if (fsig->params [0]->type == MONO_TYPE_R8)
5844 opcode = OP_LOADR8_MEMBASE;
5845 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5846 opcode = OP_LOAD_MEMBASE;
5849 MONO_INST_NEW (cfg, ins, opcode);
5850 ins->inst_basereg = args [0]->dreg;
5851 ins->inst_offset = 0;
5852 MONO_ADD_INS (cfg->cbb, ins);
5854 switch (fsig->params [0]->type) {
5861 ins->dreg = mono_alloc_ireg (cfg);
5862 ins->type = STACK_I4;
5866 ins->dreg = mono_alloc_lreg (cfg);
5867 ins->type = STACK_I8;
5871 ins->dreg = mono_alloc_ireg (cfg);
5872 #if SIZEOF_REGISTER == 8
5873 ins->type = STACK_I8;
5875 ins->type = STACK_I4;
5880 ins->dreg = mono_alloc_freg (cfg);
5881 ins->type = STACK_R8;
5884 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5885 ins->dreg = mono_alloc_ireg_ref (cfg);
5886 ins->type = STACK_OBJ;
5890 if (opcode == OP_LOADI8_MEMBASE)
5891 ins = mono_decompose_opcode (cfg, ins, NULL);
5893 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5897 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5899 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5901 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5902 opcode = OP_STOREI1_MEMBASE_REG;
5903 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5904 opcode = OP_STOREI2_MEMBASE_REG;
5905 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5906 opcode = OP_STOREI4_MEMBASE_REG;
5907 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5908 opcode = OP_STOREI8_MEMBASE_REG;
5909 else if (fsig->params [0]->type == MONO_TYPE_R4)
5910 opcode = OP_STORER4_MEMBASE_REG;
5911 else if (fsig->params [0]->type == MONO_TYPE_R8)
5912 opcode = OP_STORER8_MEMBASE_REG;
5913 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5914 opcode = OP_STORE_MEMBASE_REG;
5917 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5919 MONO_INST_NEW (cfg, ins, opcode);
5920 ins->sreg1 = args [1]->dreg;
5921 ins->inst_destbasereg = args [0]->dreg;
5922 ins->inst_offset = 0;
5923 MONO_ADD_INS (cfg->cbb, ins);
5925 if (opcode == OP_STOREI8_MEMBASE_REG)
5926 ins = mono_decompose_opcode (cfg, ins, NULL);
5931 } else if (cmethod->klass == mono_defaults.monitor_class) {
5932 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5933 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5936 if (COMPILE_LLVM (cfg)) {
5938 * Pass the argument normally, the LLVM backend will handle the
5939 * calling convention problems.
5941 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5943 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5944 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5945 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5946 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5949 return (MonoInst*)call;
5950 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5951 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5954 if (COMPILE_LLVM (cfg)) {
5956 * Pass the argument normally, the LLVM backend will handle the
5957 * calling convention problems.
5959 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5961 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5962 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5963 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5964 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5967 return (MonoInst*)call;
5969 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
5972 if (COMPILE_LLVM (cfg)) {
5973 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5975 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5976 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5977 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5978 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5981 return (MonoInst*)call;
5984 } else if (cmethod->klass->image == mono_defaults.corlib &&
5985 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5986 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5989 #if SIZEOF_REGISTER == 8
5990 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5991 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5992 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5993 ins->dreg = mono_alloc_preg (cfg);
5994 ins->sreg1 = args [0]->dreg;
5995 ins->type = STACK_I8;
5996 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5997 MONO_ADD_INS (cfg->cbb, ins);
6001 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6003 /* 64 bit reads are already atomic */
6004 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6005 load_ins->dreg = mono_alloc_preg (cfg);
6006 load_ins->inst_basereg = args [0]->dreg;
6007 load_ins->inst_offset = 0;
6008 load_ins->type = STACK_I8;
6009 MONO_ADD_INS (cfg->cbb, load_ins);
6011 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6018 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6019 MonoInst *ins_iconst;
6022 if (fsig->params [0]->type == MONO_TYPE_I4) {
6023 opcode = OP_ATOMIC_ADD_I4;
6024 cfg->has_atomic_add_i4 = TRUE;
6026 #if SIZEOF_REGISTER == 8
6027 else if (fsig->params [0]->type == MONO_TYPE_I8)
6028 opcode = OP_ATOMIC_ADD_I8;
6031 if (!mono_arch_opcode_supported (opcode))
6033 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6034 ins_iconst->inst_c0 = 1;
6035 ins_iconst->dreg = mono_alloc_ireg (cfg);
6036 MONO_ADD_INS (cfg->cbb, ins_iconst);
6038 MONO_INST_NEW (cfg, ins, opcode);
6039 ins->dreg = mono_alloc_ireg (cfg);
6040 ins->inst_basereg = args [0]->dreg;
6041 ins->inst_offset = 0;
6042 ins->sreg2 = ins_iconst->dreg;
6043 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6044 MONO_ADD_INS (cfg->cbb, ins);
6046 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6047 MonoInst *ins_iconst;
6050 if (fsig->params [0]->type == MONO_TYPE_I4) {
6051 opcode = OP_ATOMIC_ADD_I4;
6052 cfg->has_atomic_add_i4 = TRUE;
6054 #if SIZEOF_REGISTER == 8
6055 else if (fsig->params [0]->type == MONO_TYPE_I8)
6056 opcode = OP_ATOMIC_ADD_I8;
6059 if (!mono_arch_opcode_supported (opcode))
6061 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6062 ins_iconst->inst_c0 = -1;
6063 ins_iconst->dreg = mono_alloc_ireg (cfg);
6064 MONO_ADD_INS (cfg->cbb, ins_iconst);
6066 MONO_INST_NEW (cfg, ins, opcode);
6067 ins->dreg = mono_alloc_ireg (cfg);
6068 ins->inst_basereg = args [0]->dreg;
6069 ins->inst_offset = 0;
6070 ins->sreg2 = ins_iconst->dreg;
6071 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6072 MONO_ADD_INS (cfg->cbb, ins);
6074 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6077 if (fsig->params [0]->type == MONO_TYPE_I4) {
6078 opcode = OP_ATOMIC_ADD_I4;
6079 cfg->has_atomic_add_i4 = TRUE;
6081 #if SIZEOF_REGISTER == 8
6082 else if (fsig->params [0]->type == MONO_TYPE_I8)
6083 opcode = OP_ATOMIC_ADD_I8;
6086 if (!mono_arch_opcode_supported (opcode))
6088 MONO_INST_NEW (cfg, ins, opcode);
6089 ins->dreg = mono_alloc_ireg (cfg);
6090 ins->inst_basereg = args [0]->dreg;
6091 ins->inst_offset = 0;
6092 ins->sreg2 = args [1]->dreg;
6093 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6094 MONO_ADD_INS (cfg->cbb, ins);
6097 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6098 MonoInst *f2i = NULL, *i2f;
6099 guint32 opcode, f2i_opcode, i2f_opcode;
6100 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6101 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6103 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6104 fsig->params [0]->type == MONO_TYPE_R4) {
6105 opcode = OP_ATOMIC_EXCHANGE_I4;
6106 f2i_opcode = OP_MOVE_F_TO_I4;
6107 i2f_opcode = OP_MOVE_I4_TO_F;
6108 cfg->has_atomic_exchange_i4 = TRUE;
6110 #if SIZEOF_REGISTER == 8
6112 fsig->params [0]->type == MONO_TYPE_I8 ||
6113 fsig->params [0]->type == MONO_TYPE_R8 ||
6114 fsig->params [0]->type == MONO_TYPE_I) {
6115 opcode = OP_ATOMIC_EXCHANGE_I8;
6116 f2i_opcode = OP_MOVE_F_TO_I8;
6117 i2f_opcode = OP_MOVE_I8_TO_F;
6120 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6121 opcode = OP_ATOMIC_EXCHANGE_I4;
6122 cfg->has_atomic_exchange_i4 = TRUE;
6128 if (!mono_arch_opcode_supported (opcode))
6132 /* TODO: Decompose these opcodes instead of bailing here. */
6133 if (COMPILE_SOFT_FLOAT (cfg))
6136 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6137 f2i->dreg = mono_alloc_ireg (cfg);
6138 f2i->sreg1 = args [1]->dreg;
6139 if (f2i_opcode == OP_MOVE_F_TO_I4)
6140 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6141 MONO_ADD_INS (cfg->cbb, f2i);
6144 MONO_INST_NEW (cfg, ins, opcode);
6145 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6146 ins->inst_basereg = args [0]->dreg;
6147 ins->inst_offset = 0;
6148 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6149 MONO_ADD_INS (cfg->cbb, ins);
6151 switch (fsig->params [0]->type) {
6153 ins->type = STACK_I4;
6156 ins->type = STACK_I8;
6159 #if SIZEOF_REGISTER == 8
6160 ins->type = STACK_I8;
6162 ins->type = STACK_I4;
6167 ins->type = STACK_R8;
6170 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6171 ins->type = STACK_OBJ;
6176 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6177 i2f->dreg = mono_alloc_freg (cfg);
6178 i2f->sreg1 = ins->dreg;
6179 i2f->type = STACK_R8;
6180 if (i2f_opcode == OP_MOVE_I4_TO_F)
6181 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6182 MONO_ADD_INS (cfg->cbb, i2f);
6187 if (cfg->gen_write_barriers && is_ref)
6188 emit_write_barrier (cfg, args [0], args [1]);
6190 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6191 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6192 guint32 opcode, f2i_opcode, i2f_opcode;
6193 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6194 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6196 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6197 fsig->params [1]->type == MONO_TYPE_R4) {
6198 opcode = OP_ATOMIC_CAS_I4;
6199 f2i_opcode = OP_MOVE_F_TO_I4;
6200 i2f_opcode = OP_MOVE_I4_TO_F;
6201 cfg->has_atomic_cas_i4 = TRUE;
6203 #if SIZEOF_REGISTER == 8
6205 fsig->params [1]->type == MONO_TYPE_I8 ||
6206 fsig->params [1]->type == MONO_TYPE_R8 ||
6207 fsig->params [1]->type == MONO_TYPE_I) {
6208 opcode = OP_ATOMIC_CAS_I8;
6209 f2i_opcode = OP_MOVE_F_TO_I8;
6210 i2f_opcode = OP_MOVE_I8_TO_F;
6213 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6214 opcode = OP_ATOMIC_CAS_I4;
6215 cfg->has_atomic_cas_i4 = TRUE;
6221 if (!mono_arch_opcode_supported (opcode))
6225 /* TODO: Decompose these opcodes instead of bailing here. */
6226 if (COMPILE_SOFT_FLOAT (cfg))
6229 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6230 f2i_new->dreg = mono_alloc_ireg (cfg);
6231 f2i_new->sreg1 = args [1]->dreg;
6232 if (f2i_opcode == OP_MOVE_F_TO_I4)
6233 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6234 MONO_ADD_INS (cfg->cbb, f2i_new);
6236 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6237 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6238 f2i_cmp->sreg1 = args [2]->dreg;
6239 if (f2i_opcode == OP_MOVE_F_TO_I4)
6240 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6241 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6244 MONO_INST_NEW (cfg, ins, opcode);
6245 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6246 ins->sreg1 = args [0]->dreg;
6247 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6248 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6249 MONO_ADD_INS (cfg->cbb, ins);
6251 switch (fsig->params [0]->type) {
6253 ins->type = STACK_I4;
6256 ins->type = STACK_I8;
6259 #if SIZEOF_REGISTER == 8
6260 ins->type = STACK_I8;
6262 ins->type = STACK_I4;
6267 ins->type = STACK_R8;
6270 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6271 ins->type = STACK_OBJ;
6276 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6277 i2f->dreg = mono_alloc_freg (cfg);
6278 i2f->sreg1 = ins->dreg;
6279 i2f->type = STACK_R8;
6280 if (i2f_opcode == OP_MOVE_I4_TO_F)
6281 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6282 MONO_ADD_INS (cfg->cbb, i2f);
6287 if (cfg->gen_write_barriers && is_ref)
6288 emit_write_barrier (cfg, args [0], args [1]);
6290 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6291 fsig->params [1]->type == MONO_TYPE_I4) {
6292 MonoInst *cmp, *ceq;
6294 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6297 /* int32 r = CAS (location, value, comparand); */
6298 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6299 ins->dreg = alloc_ireg (cfg);
6300 ins->sreg1 = args [0]->dreg;
6301 ins->sreg2 = args [1]->dreg;
6302 ins->sreg3 = args [2]->dreg;
6303 ins->type = STACK_I4;
6304 MONO_ADD_INS (cfg->cbb, ins);
6306 /* bool result = r == comparand; */
6307 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6308 cmp->sreg1 = ins->dreg;
6309 cmp->sreg2 = args [2]->dreg;
6310 cmp->type = STACK_I4;
6311 MONO_ADD_INS (cfg->cbb, cmp);
6313 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6314 ceq->dreg = alloc_ireg (cfg);
6315 ceq->type = STACK_I4;
6316 MONO_ADD_INS (cfg->cbb, ceq);
6318 /* *success = result; */
6319 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6321 cfg->has_atomic_cas_i4 = TRUE;
6323 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6324 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6328 } else if (cmethod->klass->image == mono_defaults.corlib &&
6329 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6330 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6333 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6335 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6336 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6338 if (fsig->params [0]->type == MONO_TYPE_I1)
6339 opcode = OP_ATOMIC_LOAD_I1;
6340 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6341 opcode = OP_ATOMIC_LOAD_U1;
6342 else if (fsig->params [0]->type == MONO_TYPE_I2)
6343 opcode = OP_ATOMIC_LOAD_I2;
6344 else if (fsig->params [0]->type == MONO_TYPE_U2)
6345 opcode = OP_ATOMIC_LOAD_U2;
6346 else if (fsig->params [0]->type == MONO_TYPE_I4)
6347 opcode = OP_ATOMIC_LOAD_I4;
6348 else if (fsig->params [0]->type == MONO_TYPE_U4)
6349 opcode = OP_ATOMIC_LOAD_U4;
6350 else if (fsig->params [0]->type == MONO_TYPE_R4)
6351 opcode = OP_ATOMIC_LOAD_R4;
6352 else if (fsig->params [0]->type == MONO_TYPE_R8)
6353 opcode = OP_ATOMIC_LOAD_R8;
6354 #if SIZEOF_REGISTER == 8
6355 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6356 opcode = OP_ATOMIC_LOAD_I8;
6357 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6358 opcode = OP_ATOMIC_LOAD_U8;
6360 else if (fsig->params [0]->type == MONO_TYPE_I)
6361 opcode = OP_ATOMIC_LOAD_I4;
6362 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6363 opcode = OP_ATOMIC_LOAD_U4;
6367 if (!mono_arch_opcode_supported (opcode))
6370 MONO_INST_NEW (cfg, ins, opcode);
6371 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6372 ins->sreg1 = args [0]->dreg;
6373 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6374 MONO_ADD_INS (cfg->cbb, ins);
6376 switch (fsig->params [0]->type) {
6377 case MONO_TYPE_BOOLEAN:
6384 ins->type = STACK_I4;
6388 ins->type = STACK_I8;
6392 #if SIZEOF_REGISTER == 8
6393 ins->type = STACK_I8;
6395 ins->type = STACK_I4;
6400 ins->type = STACK_R8;
6403 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6404 ins->type = STACK_OBJ;
6410 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6412 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6414 if (fsig->params [0]->type == MONO_TYPE_I1)
6415 opcode = OP_ATOMIC_STORE_I1;
6416 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6417 opcode = OP_ATOMIC_STORE_U1;
6418 else if (fsig->params [0]->type == MONO_TYPE_I2)
6419 opcode = OP_ATOMIC_STORE_I2;
6420 else if (fsig->params [0]->type == MONO_TYPE_U2)
6421 opcode = OP_ATOMIC_STORE_U2;
6422 else if (fsig->params [0]->type == MONO_TYPE_I4)
6423 opcode = OP_ATOMIC_STORE_I4;
6424 else if (fsig->params [0]->type == MONO_TYPE_U4)
6425 opcode = OP_ATOMIC_STORE_U4;
6426 else if (fsig->params [0]->type == MONO_TYPE_R4)
6427 opcode = OP_ATOMIC_STORE_R4;
6428 else if (fsig->params [0]->type == MONO_TYPE_R8)
6429 opcode = OP_ATOMIC_STORE_R8;
6430 #if SIZEOF_REGISTER == 8
6431 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6432 opcode = OP_ATOMIC_STORE_I8;
6433 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6434 opcode = OP_ATOMIC_STORE_U8;
6436 else if (fsig->params [0]->type == MONO_TYPE_I)
6437 opcode = OP_ATOMIC_STORE_I4;
6438 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6439 opcode = OP_ATOMIC_STORE_U4;
6443 if (!mono_arch_opcode_supported (opcode))
6446 MONO_INST_NEW (cfg, ins, opcode);
6447 ins->dreg = args [0]->dreg;
6448 ins->sreg1 = args [1]->dreg;
6449 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6450 MONO_ADD_INS (cfg->cbb, ins);
6452 if (cfg->gen_write_barriers && is_ref)
6453 emit_write_barrier (cfg, args [0], args [1]);
6459 } else if (cmethod->klass->image == mono_defaults.corlib &&
6460 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6461 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6462 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6463 if (should_insert_brekpoint (cfg->method)) {
6464 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6466 MONO_INST_NEW (cfg, ins, OP_NOP);
6467 MONO_ADD_INS (cfg->cbb, ins);
6471 } else if (cmethod->klass->image == mono_defaults.corlib &&
6472 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6473 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6474 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6476 EMIT_NEW_ICONST (cfg, ins, 1);
6478 EMIT_NEW_ICONST (cfg, ins, 0);
6481 } else if (cmethod->klass == mono_defaults.math_class) {
6483 * There is general branchless code for Min/Max, but it does not work for
6485 * http://everything2.com/?node_id=1051618
6487 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6488 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6489 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6490 !strcmp (cmethod->klass->name, "Selector")) {
6491 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6492 if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
6493 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6496 MonoJumpInfoToken *ji;
6499 cfg->disable_llvm = TRUE;
6501 if (args [0]->opcode == OP_GOT_ENTRY) {
6502 pi = args [0]->inst_p1;
6503 g_assert (pi->opcode == OP_PATCH_INFO);
6504 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6507 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6508 ji = args [0]->inst_p0;
6511 NULLIFY_INS (args [0]);
6514 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6515 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6516 ins->dreg = mono_alloc_ireg (cfg);
6518 ins->inst_p0 = mono_string_to_utf8 (s);
6519 MONO_ADD_INS (cfg->cbb, ins);
6525 #ifdef MONO_ARCH_SIMD_INTRINSICS
6526 if (cfg->opt & MONO_OPT_SIMD) {
6527 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6533 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6537 if (COMPILE_LLVM (cfg)) {
6538 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6543 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6547 * This entry point could be used later for arbitrary method
6550 inline static MonoInst*
6551 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6552 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6554 if (method->klass == mono_defaults.string_class) {
6555 /* managed string allocation support */
6556 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6557 MonoInst *iargs [2];
6558 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6559 MonoMethod *managed_alloc = NULL;
6561 g_assert (vtable); /*Should not fail since it System.String*/
6562 #ifndef MONO_CROSS_COMPILE
6563 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6567 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6568 iargs [1] = args [0];
6569 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6576 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6578 MonoInst *store, *temp;
6581 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6582 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6585 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6586 * would be different than the MonoInst's used to represent arguments, and
6587 * the ldelema implementation can't deal with that.
6588 * Solution: When ldelema is used on an inline argument, create a var for
6589 * it, emit ldelema on that var, and emit the saving code below in
6590 * inline_method () if needed.
6592 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6593 cfg->args [i] = temp;
6594 /* This uses cfg->args [i] which is set by the preceeding line */
6595 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6596 store->cil_code = sp [0]->cil_code;
6601 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6602 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6604 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6606 check_inline_called_method_name_limit (MonoMethod *called_method)
6609 static const char *limit = NULL;
6611 if (limit == NULL) {
6612 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6614 if (limit_string != NULL)
6615 limit = limit_string;
6620 if (limit [0] != '\0') {
6621 char *called_method_name = mono_method_full_name (called_method, TRUE);
6623 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6624 g_free (called_method_name);
6626 //return (strncmp_result <= 0);
6627 return (strncmp_result == 0);
6634 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6636 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6639 static const char *limit = NULL;
6641 if (limit == NULL) {
6642 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6643 if (limit_string != NULL) {
6644 limit = limit_string;
6650 if (limit [0] != '\0') {
6651 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6653 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6654 g_free (caller_method_name);
6656 //return (strncmp_result <= 0);
6657 return (strncmp_result == 0);
6665 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6667 static double r8_0 = 0.0;
6668 static float r4_0 = 0.0;
6672 rtype = mini_replace_type (rtype);
6676 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6677 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6678 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6679 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6680 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6681 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6682 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6683 ins->type = STACK_R4;
6684 ins->inst_p0 = (void*)&r4_0;
6686 MONO_ADD_INS (cfg->cbb, ins);
6687 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6688 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6689 ins->type = STACK_R8;
6690 ins->inst_p0 = (void*)&r8_0;
6692 MONO_ADD_INS (cfg->cbb, ins);
6693 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6694 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6695 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6696 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6697 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6699 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6704 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6708 rtype = mini_replace_type (rtype);
6712 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6713 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6714 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6715 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6716 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6717 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6718 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6719 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6720 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6721 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6722 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6723 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6724 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6725 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6727 emit_init_rvar (cfg, dreg, rtype);
6731 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6733 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6735 MonoInst *var = cfg->locals [local];
6736 if (COMPILE_SOFT_FLOAT (cfg)) {
6738 int reg = alloc_dreg (cfg, var->type);
6739 emit_init_rvar (cfg, reg, type);
6740 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6743 emit_init_rvar (cfg, var->dreg, type);
6745 emit_dummy_init_rvar (cfg, var->dreg, type);
6752 * Return the cost of inlining CMETHOD.
6755 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6756 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6758 MonoInst *ins, *rvar = NULL;
6759 MonoMethodHeader *cheader;
6760 MonoBasicBlock *ebblock, *sbblock;
6762 MonoMethod *prev_inlined_method;
6763 MonoInst **prev_locals, **prev_args;
6764 MonoType **prev_arg_types;
6765 guint prev_real_offset;
6766 GHashTable *prev_cbb_hash;
6767 MonoBasicBlock **prev_cil_offset_to_bb;
6768 MonoBasicBlock *prev_cbb;
6769 unsigned char* prev_cil_start;
6770 guint32 prev_cil_offset_to_bb_len;
6771 MonoMethod *prev_current_method;
6772 MonoGenericContext *prev_generic_context;
6773 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6775 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6777 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6778 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6781 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6782 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6787 fsig = mono_method_signature (cmethod);
6789 if (cfg->verbose_level > 2)
6790 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6792 if (!cmethod->inline_info) {
6793 cfg->stat_inlineable_methods++;
6794 cmethod->inline_info = 1;
6797 /* allocate local variables */
6798 cheader = mono_method_get_header (cmethod);
6800 if (cheader == NULL || mono_loader_get_last_error ()) {
6801 MonoLoaderError *error = mono_loader_get_last_error ();
6804 mono_metadata_free_mh (cheader);
6805 if (inline_always && error)
6806 mono_cfg_set_exception (cfg, error->exception_type);
6808 mono_loader_clear_error ();
6812 /*Must verify before creating locals as it can cause the JIT to assert.*/
6813 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6814 mono_metadata_free_mh (cheader);
6818 /* allocate space to store the return value */
6819 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6820 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6823 prev_locals = cfg->locals;
6824 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6825 for (i = 0; i < cheader->num_locals; ++i)
6826 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6828 /* allocate start and end blocks */
6829 /* This is needed so if the inline is aborted, we can clean up */
6830 NEW_BBLOCK (cfg, sbblock);
6831 sbblock->real_offset = real_offset;
6833 NEW_BBLOCK (cfg, ebblock);
6834 ebblock->block_num = cfg->num_bblocks++;
6835 ebblock->real_offset = real_offset;
6837 prev_args = cfg->args;
6838 prev_arg_types = cfg->arg_types;
6839 prev_inlined_method = cfg->inlined_method;
6840 cfg->inlined_method = cmethod;
6841 cfg->ret_var_set = FALSE;
6842 cfg->inline_depth ++;
6843 prev_real_offset = cfg->real_offset;
6844 prev_cbb_hash = cfg->cbb_hash;
6845 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6846 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6847 prev_cil_start = cfg->cil_start;
6848 prev_cbb = cfg->cbb;
6849 prev_current_method = cfg->current_method;
6850 prev_generic_context = cfg->generic_context;
6851 prev_ret_var_set = cfg->ret_var_set;
6852 prev_disable_inline = cfg->disable_inline;
6854 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6857 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6859 ret_var_set = cfg->ret_var_set;
6861 cfg->inlined_method = prev_inlined_method;
6862 cfg->real_offset = prev_real_offset;
6863 cfg->cbb_hash = prev_cbb_hash;
6864 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6865 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6866 cfg->cil_start = prev_cil_start;
6867 cfg->locals = prev_locals;
6868 cfg->args = prev_args;
6869 cfg->arg_types = prev_arg_types;
6870 cfg->current_method = prev_current_method;
6871 cfg->generic_context = prev_generic_context;
6872 cfg->ret_var_set = prev_ret_var_set;
6873 cfg->disable_inline = prev_disable_inline;
6874 cfg->inline_depth --;
6876 if ((costs >= 0 && costs < 60) || inline_always) {
6877 if (cfg->verbose_level > 2)
6878 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6880 cfg->stat_inlined_methods++;
6882 /* always add some code to avoid block split failures */
6883 MONO_INST_NEW (cfg, ins, OP_NOP);
6884 MONO_ADD_INS (prev_cbb, ins);
6886 prev_cbb->next_bb = sbblock;
6887 link_bblock (cfg, prev_cbb, sbblock);
6890 * Get rid of the begin and end bblocks if possible to aid local
6893 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6895 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6896 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6898 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6899 MonoBasicBlock *prev = ebblock->in_bb [0];
6900 mono_merge_basic_blocks (cfg, prev, ebblock);
6902 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6903 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6904 cfg->cbb = prev_cbb;
6908 * Its possible that the rvar is set in some prev bblock, but not in others.
6914 for (i = 0; i < ebblock->in_count; ++i) {
6915 bb = ebblock->in_bb [i];
6917 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6920 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6929 *out_cbb = cfg->cbb;
6933 * If the inlined method contains only a throw, then the ret var is not
6934 * set, so set it to a dummy value.
6937 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6939 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6942 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6945 if (cfg->verbose_level > 2)
6946 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6947 cfg->exception_type = MONO_EXCEPTION_NONE;
6948 mono_loader_clear_error ();
6950 /* This gets rid of the newly added bblocks */
6951 cfg->cbb = prev_cbb;
6953 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6958 * Some of these comments may well be out-of-date.
6959 * Design decisions: we do a single pass over the IL code (and we do bblock
6960 * splitting/merging in the few cases when it's required: a back jump to an IL
6961 * address that was not already seen as bblock starting point).
6962 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6963 * Complex operations are decomposed in simpler ones right away. We need to let the
6964 * arch-specific code peek and poke inside this process somehow (except when the
6965 * optimizations can take advantage of the full semantic info of coarse opcodes).
6966 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6967 * MonoInst->opcode initially is the IL opcode or some simplification of that
6968 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6969 * opcode with value bigger than OP_LAST.
6970 * At this point the IR can be handed over to an interpreter, a dumb code generator
6971 * or to the optimizing code generator that will translate it to SSA form.
6973 * Profiling directed optimizations.
6974 * We may compile by default with few or no optimizations and instrument the code
6975 * or the user may indicate what methods to optimize the most either in a config file
6976 * or through repeated runs where the compiler applies offline the optimizations to
6977 * each method and then decides if it was worth it.
6980 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6981 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6982 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6983 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6984 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6985 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6986 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6987 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6989 /* offset from br.s -> br like opcodes */
6990 #define BIG_BRANCH_OFFSET 13
6993 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6995 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6997 return b == NULL || b == bb;
7001 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7003 unsigned char *ip = start;
7004 unsigned char *target;
7007 MonoBasicBlock *bblock;
7008 const MonoOpcode *opcode;
7011 cli_addr = ip - start;
7012 i = mono_opcode_value ((const guint8 **)&ip, end);
7015 opcode = &mono_opcodes [i];
7016 switch (opcode->argument) {
7017 case MonoInlineNone:
7020 case MonoInlineString:
7021 case MonoInlineType:
7022 case MonoInlineField:
7023 case MonoInlineMethod:
7026 case MonoShortInlineR:
7033 case MonoShortInlineVar:
7034 case MonoShortInlineI:
7037 case MonoShortInlineBrTarget:
7038 target = start + cli_addr + 2 + (signed char)ip [1];
7039 GET_BBLOCK (cfg, bblock, target);
7042 GET_BBLOCK (cfg, bblock, ip);
7044 case MonoInlineBrTarget:
7045 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7046 GET_BBLOCK (cfg, bblock, target);
7049 GET_BBLOCK (cfg, bblock, ip);
7051 case MonoInlineSwitch: {
7052 guint32 n = read32 (ip + 1);
7055 cli_addr += 5 + 4 * n;
7056 target = start + cli_addr;
7057 GET_BBLOCK (cfg, bblock, target);
7059 for (j = 0; j < n; ++j) {
7060 target = start + cli_addr + (gint32)read32 (ip);
7061 GET_BBLOCK (cfg, bblock, target);
7071 g_assert_not_reached ();
7074 if (i == CEE_THROW) {
7075 unsigned char *bb_start = ip - 1;
7077 /* Find the start of the bblock containing the throw */
7079 while ((bb_start >= start) && !bblock) {
7080 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7084 bblock->out_of_line = 1;
7094 static inline MonoMethod *
7095 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7099 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7100 method = mono_method_get_wrapper_data (m, token);
7103 method = mono_class_inflate_generic_method_checked (method, context, &error);
7104 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7107 method = mono_get_method_full (m->klass->image, token, klass, context);
7113 static inline MonoMethod *
7114 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7116 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7118 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7124 static inline MonoClass*
7125 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7130 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7131 klass = mono_method_get_wrapper_data (method, token);
7133 klass = mono_class_inflate_generic_class (klass, context);
7135 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7136 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7139 mono_class_init (klass);
7143 static inline MonoMethodSignature*
7144 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7146 MonoMethodSignature *fsig;
7148 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7151 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7153 fsig = mono_inflate_generic_signature (fsig, context, &error);
7155 g_assert (mono_error_ok (&error));
7158 fsig = mono_metadata_parse_signature (method->klass->image, token);
7164 * Returns TRUE if the JIT should abort inlining because "callee"
7165 * is influenced by security attributes.
7168 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7172 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
7176 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
7177 if (result == MONO_JIT_SECURITY_OK)
7180 if (result == MONO_JIT_LINKDEMAND_ECMA) {
7181 /* Generate code to throw a SecurityException before the actual call/link */
7182 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7185 NEW_ICONST (cfg, args [0], 4);
7186 NEW_METHODCONST (cfg, args [1], caller);
7187 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
7188 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
7189 /* don't hide previous results */
7190 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
7191 cfg->exception_data = result;
7199 throw_exception (void)
7201 static MonoMethod *method = NULL;
7204 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7205 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7212 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7214 MonoMethod *thrower = throw_exception ();
7217 EMIT_NEW_PCONST (cfg, args [0], ex);
7218 mono_emit_method_call (cfg, thrower, args, NULL);
7222 * Return the original method is a wrapper is specified. We can only access
7223 * the custom attributes from the original method.
7226 get_original_method (MonoMethod *method)
7228 if (method->wrapper_type == MONO_WRAPPER_NONE)
7231 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7232 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7235 /* in other cases we need to find the original method */
7236 return mono_marshal_method_from_wrapper (method);
7240 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7241 MonoBasicBlock *bblock, unsigned char *ip)
7243 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7244 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7246 emit_throw_exception (cfg, ex);
7250 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7251 MonoBasicBlock *bblock, unsigned char *ip)
7253 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7254 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7256 emit_throw_exception (cfg, ex);
7260 * Check that the IL instructions at ip are the array initialization
7261 * sequence and return the pointer to the data and the size.
7264 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7267 * newarr[System.Int32]
7269 * ldtoken field valuetype ...
7270 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7272 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7274 guint32 token = read32 (ip + 7);
7275 guint32 field_token = read32 (ip + 2);
7276 guint32 field_index = field_token & 0xffffff;
7278 const char *data_ptr;
7280 MonoMethod *cmethod;
7281 MonoClass *dummy_class;
7282 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7286 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7290 *out_field_token = field_token;
7292 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7295 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7297 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7298 case MONO_TYPE_BOOLEAN:
7302 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7303 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7304 case MONO_TYPE_CHAR:
7321 if (size > mono_type_size (field->type, &dummy_align))
7324 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7325 if (!image_is_dynamic (method->klass->image)) {
7326 field_index = read32 (ip + 2) & 0xffffff;
7327 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7328 data_ptr = mono_image_rva_map (method->klass->image, rva);
7329 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7330 /* for aot code we do the lookup on load */
7331 if (aot && data_ptr)
7332 return GUINT_TO_POINTER (rva);
7334 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7336 data_ptr = mono_field_get_data (field);
7344 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7346 char *method_fname = mono_method_full_name (method, TRUE);
7348 MonoMethodHeader *header = mono_method_get_header (method);
7350 if (header->code_size == 0)
7351 method_code = g_strdup ("method body is empty.");
7353 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7354 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7355 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7356 g_free (method_fname);
7357 g_free (method_code);
7358 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7362 set_exception_object (MonoCompile *cfg, MonoException *exception)
7364 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7365 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7366 cfg->exception_ptr = exception;
7370 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7373 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7374 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7375 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7376 /* Optimize reg-reg moves away */
7378 * Can't optimize other opcodes, since sp[0] might point to
7379 * the last ins of a decomposed opcode.
7381 sp [0]->dreg = (cfg)->locals [n]->dreg;
7383 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7388 * ldloca inhibits many optimizations so try to get rid of it in common
7391 static inline unsigned char *
7392 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7402 local = read16 (ip + 2);
7406 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7407 /* From the INITOBJ case */
7408 token = read32 (ip + 2);
7409 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7410 CHECK_TYPELOAD (klass);
7411 type = mini_replace_type (&klass->byval_arg);
7412 emit_init_local (cfg, local, type, TRUE);
7420 is_exception_class (MonoClass *class)
7423 if (class == mono_defaults.exception_class)
7425 class = class->parent;
7431 * is_jit_optimizer_disabled:
7433 * Determine whenever M's assembly has a DebuggableAttribute with the
7434 * IsJITOptimizerDisabled flag set.
7437 is_jit_optimizer_disabled (MonoMethod *m)
7439 MonoAssembly *ass = m->klass->image->assembly;
7440 MonoCustomAttrInfo* attrs;
7441 static MonoClass *klass;
7443 gboolean val = FALSE;
7446 if (ass->jit_optimizer_disabled_inited)
7447 return ass->jit_optimizer_disabled;
7450 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7453 ass->jit_optimizer_disabled = FALSE;
7454 mono_memory_barrier ();
7455 ass->jit_optimizer_disabled_inited = TRUE;
7459 attrs = mono_custom_attrs_from_assembly (ass);
7461 for (i = 0; i < attrs->num_attrs; ++i) {
7462 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7465 MonoMethodSignature *sig;
7467 if (!attr->ctor || attr->ctor->klass != klass)
7469 /* Decode the attribute. See reflection.c */
7470 len = attr->data_size;
7471 p = (const char*)attr->data;
7472 g_assert (read16 (p) == 0x0001);
7475 // FIXME: Support named parameters
7476 sig = mono_method_signature (attr->ctor);
7477 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7479 /* Two boolean arguments */
7483 mono_custom_attrs_free (attrs);
7486 ass->jit_optimizer_disabled = val;
7487 mono_memory_barrier ();
7488 ass->jit_optimizer_disabled_inited = TRUE;
7494 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7496 gboolean supported_tail_call;
7499 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7500 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7502 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7505 for (i = 0; i < fsig->param_count; ++i) {
7506 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7507 /* These can point to the current method's stack */
7508 supported_tail_call = FALSE;
7510 if (fsig->hasthis && cmethod->klass->valuetype)
7511 /* this might point to the current method's stack */
7512 supported_tail_call = FALSE;
7513 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7514 supported_tail_call = FALSE;
7515 if (cfg->method->save_lmf)
7516 supported_tail_call = FALSE;
7517 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7518 supported_tail_call = FALSE;
7519 if (call_opcode != CEE_CALL)
7520 supported_tail_call = FALSE;
7522 /* Debugging support */
7524 if (supported_tail_call) {
7525 if (!mono_debug_count ())
7526 supported_tail_call = FALSE;
7530 return supported_tail_call;
7533 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7534 * it to the thread local value based on the tls_offset field. Every other kind of access to
7535 * the field causes an assert.
7538 is_magic_tls_access (MonoClassField *field)
7540 if (strcmp (field->name, "tlsdata"))
7542 if (strcmp (field->parent->name, "ThreadLocal`1"))
7544 return field->parent->image == mono_defaults.corlib;
7547 /* emits the code needed to access a managed tls var (like ThreadStatic)
7548 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7549 * pointer for the current thread.
7550 * Returns the MonoInst* representing the address of the tls var.
7553 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7556 int static_data_reg, array_reg, dreg;
7557 int offset2_reg, idx_reg;
7558 // inlined access to the tls data
7559 // idx = (offset >> 24) - 1;
7560 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
7561 static_data_reg = alloc_ireg (cfg);
7562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7563 idx_reg = alloc_ireg (cfg);
7564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
7565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7566 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7567 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7568 array_reg = alloc_ireg (cfg);
7569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7570 offset2_reg = alloc_ireg (cfg);
7571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7572 dreg = alloc_ireg (cfg);
7573 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7578 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7579 * this address is cached per-method in cached_tls_addr.
7582 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7584 MonoInst *load, *addr, *temp, *store, *thread_ins;
7585 MonoClassField *offset_field;
7587 if (*cached_tls_addr) {
7588 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7591 thread_ins = mono_get_thread_intrinsic (cfg);
7592 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7594 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7596 MONO_ADD_INS (cfg->cbb, thread_ins);
7598 MonoMethod *thread_method;
7599 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7600 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7602 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7603 addr->klass = mono_class_from_mono_type (tls_field->type);
7604 addr->type = STACK_MP;
7605 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7606 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7608 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7615 * Handle calls made to ctors from NEWOBJ opcodes.
7617 * REF_BBLOCK will point to the current bblock after the call.
7620 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7621 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7623 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7624 MonoBasicBlock *bblock = *ref_bblock;
7626 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7627 mono_method_is_generic_sharable (cmethod, TRUE)) {
7628 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7629 mono_class_vtable (cfg->domain, cmethod->klass);
7630 CHECK_TYPELOAD (cmethod->klass);
7632 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7633 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7636 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7637 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7639 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7641 CHECK_TYPELOAD (cmethod->klass);
7642 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7647 /* Avoid virtual calls to ctors if possible */
7648 if (mono_class_is_marshalbyref (cmethod->klass))
7649 callvirt_this_arg = sp [0];
7651 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7652 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7653 CHECK_CFG_EXCEPTION;
7654 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7655 mono_method_check_inlining (cfg, cmethod) &&
7656 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7659 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7660 cfg->real_offset += 5;
7662 *inline_costs += costs - 5;
7663 *ref_bblock = bblock;
7665 INLINE_FAILURE ("inline failure");
7666 // FIXME-VT: Clean this up
7667 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7668 GSHAREDVT_FAILURE(*ip);
7669 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7671 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7674 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7675 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7676 } else if (context_used &&
7677 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7678 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7679 MonoInst *cmethod_addr;
7681 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7683 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7684 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7686 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7688 INLINE_FAILURE ("ctor call");
7689 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7690 callvirt_this_arg, NULL, vtable_arg);
7697 * mono_method_to_ir:
7699 * Translate the .net IL into linear IR.
7702 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7703 MonoInst *return_var, MonoInst **inline_args,
7704 guint inline_offset, gboolean is_virtual_call)
7707 MonoInst *ins, **sp, **stack_start;
7708 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7709 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7710 MonoMethod *cmethod, *method_definition;
7711 MonoInst **arg_array;
7712 MonoMethodHeader *header;
7714 guint32 token, ins_flag;
7716 MonoClass *constrained_call = NULL;
7717 unsigned char *ip, *end, *target, *err_pos;
7718 MonoMethodSignature *sig;
7719 MonoGenericContext *generic_context = NULL;
7720 MonoGenericContainer *generic_container = NULL;
7721 MonoType **param_types;
7722 int i, n, start_new_bblock, dreg;
7723 int num_calls = 0, inline_costs = 0;
7724 int breakpoint_id = 0;
7726 MonoBoolean security, pinvoke;
7727 MonoSecurityManager* secman = NULL;
7728 MonoDeclSecurityActions actions;
7729 GSList *class_inits = NULL;
7730 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7732 gboolean init_locals, seq_points, skip_dead_blocks;
7733 gboolean sym_seq_points = FALSE;
7734 MonoInst *cached_tls_addr = NULL;
7735 MonoDebugMethodInfo *minfo;
7736 MonoBitSet *seq_point_locs = NULL;
7737 MonoBitSet *seq_point_set_locs = NULL;
7739 cfg->disable_inline = is_jit_optimizer_disabled (method);
7741 /* serialization and xdomain stuff may need access to private fields and methods */
7742 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7743 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7744 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7745 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7746 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7747 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7749 dont_verify |= mono_security_smcs_hack_enabled ();
7751 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7752 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7753 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7754 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7755 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7757 image = method->klass->image;
7758 header = mono_method_get_header (method);
7760 MonoLoaderError *error;
7762 if ((error = mono_loader_get_last_error ())) {
7763 mono_cfg_set_exception (cfg, error->exception_type);
7765 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7766 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7768 goto exception_exit;
7770 generic_container = mono_method_get_generic_container (method);
7771 sig = mono_method_signature (method);
7772 num_args = sig->hasthis + sig->param_count;
7773 ip = (unsigned char*)header->code;
7774 cfg->cil_start = ip;
7775 end = ip + header->code_size;
7776 cfg->stat_cil_code_size += header->code_size;
7778 seq_points = cfg->gen_seq_points && cfg->method == method;
7779 #ifdef PLATFORM_ANDROID
7780 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7783 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7784 /* We could hit a seq point before attaching to the JIT (#8338) */
7788 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7789 minfo = mono_debug_lookup_method (method);
7791 int i, n_il_offsets;
7795 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7796 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7797 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7798 sym_seq_points = TRUE;
7799 for (i = 0; i < n_il_offsets; ++i) {
7800 if (il_offsets [i] < header->code_size)
7801 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7803 g_free (il_offsets);
7804 g_free (line_numbers);
7805 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7806 /* Methods without line number info like auto-generated property accessors */
7807 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7808 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7809 sym_seq_points = TRUE;
7814 * Methods without init_locals set could cause asserts in various passes
7815 * (#497220). To work around this, we emit dummy initialization opcodes
7816 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7817 * on some platforms.
7819 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7820 init_locals = header->init_locals;
7824 method_definition = method;
7825 while (method_definition->is_inflated) {
7826 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7827 method_definition = imethod->declaring;
7830 /* SkipVerification is not allowed if core-clr is enabled */
7831 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7833 dont_verify_stloc = TRUE;
7836 if (sig->is_inflated)
7837 generic_context = mono_method_get_context (method);
7838 else if (generic_container)
7839 generic_context = &generic_container->context;
7840 cfg->generic_context = generic_context;
7842 if (!cfg->generic_sharing_context)
7843 g_assert (!sig->has_type_parameters);
7845 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7846 g_assert (method->is_inflated);
7847 g_assert (mono_method_get_context (method)->method_inst);
7849 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7850 g_assert (sig->generic_param_count);
7852 if (cfg->method == method) {
7853 cfg->real_offset = 0;
7855 cfg->real_offset = inline_offset;
7858 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7859 cfg->cil_offset_to_bb_len = header->code_size;
7861 cfg->current_method = method;
7863 if (cfg->verbose_level > 2)
7864 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7866 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7868 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7869 for (n = 0; n < sig->param_count; ++n)
7870 param_types [n + sig->hasthis] = sig->params [n];
7871 cfg->arg_types = param_types;
7873 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7874 if (cfg->method == method) {
7876 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7877 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7880 NEW_BBLOCK (cfg, start_bblock);
7881 cfg->bb_entry = start_bblock;
7882 start_bblock->cil_code = NULL;
7883 start_bblock->cil_length = 0;
7884 #if defined(__native_client_codegen__)
7885 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7886 ins->dreg = alloc_dreg (cfg, STACK_I4);
7887 MONO_ADD_INS (start_bblock, ins);
7891 NEW_BBLOCK (cfg, end_bblock);
7892 cfg->bb_exit = end_bblock;
7893 end_bblock->cil_code = NULL;
7894 end_bblock->cil_length = 0;
7895 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7896 g_assert (cfg->num_bblocks == 2);
7898 arg_array = cfg->args;
7900 if (header->num_clauses) {
7901 cfg->spvars = g_hash_table_new (NULL, NULL);
7902 cfg->exvars = g_hash_table_new (NULL, NULL);
7904 /* handle exception clauses */
7905 for (i = 0; i < header->num_clauses; ++i) {
7906 MonoBasicBlock *try_bb;
7907 MonoExceptionClause *clause = &header->clauses [i];
7908 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7909 try_bb->real_offset = clause->try_offset;
7910 try_bb->try_start = TRUE;
7911 try_bb->region = ((i + 1) << 8) | clause->flags;
7912 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7913 tblock->real_offset = clause->handler_offset;
7914 tblock->flags |= BB_EXCEPTION_HANDLER;
7917 * Linking the try block with the EH block hinders inlining as we won't be able to
7918 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7920 if (COMPILE_LLVM (cfg))
7921 link_bblock (cfg, try_bb, tblock);
7923 if (*(ip + clause->handler_offset) == CEE_POP)
7924 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7926 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7927 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7928 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7929 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7930 MONO_ADD_INS (tblock, ins);
7932 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7933 /* finally clauses already have a seq point */
7934 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7935 MONO_ADD_INS (tblock, ins);
7938 /* todo: is a fault block unsafe to optimize? */
7939 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7940 tblock->flags |= BB_EXCEPTION_UNSAFE;
7944 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7946 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7948 /* catch and filter blocks get the exception object on the stack */
7949 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7950 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7951 MonoInst *dummy_use;
7953 /* mostly like handle_stack_args (), but just sets the input args */
7954 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7955 tblock->in_scount = 1;
7956 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7957 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7960 * Add a dummy use for the exvar so its liveness info will be
7964 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7966 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7967 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7968 tblock->flags |= BB_EXCEPTION_HANDLER;
7969 tblock->real_offset = clause->data.filter_offset;
7970 tblock->in_scount = 1;
7971 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7972 /* The filter block shares the exvar with the handler block */
7973 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7974 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7975 MONO_ADD_INS (tblock, ins);
7979 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7980 clause->data.catch_class &&
7981 cfg->generic_sharing_context &&
7982 mono_class_check_context_used (clause->data.catch_class)) {
7984 * In shared generic code with catch
7985 * clauses containing type variables
7986 * the exception handling code has to
7987 * be able to get to the rgctx.
7988 * Therefore we have to make sure that
7989 * the vtable/mrgctx argument (for
7990 * static or generic methods) or the
7991 * "this" argument (for non-static
7992 * methods) are live.
7994 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7995 mini_method_get_context (method)->method_inst ||
7996 method->klass->valuetype) {
7997 mono_get_vtable_var (cfg);
7999 MonoInst *dummy_use;
8001 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8006 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8007 cfg->cbb = start_bblock;
8008 cfg->args = arg_array;
8009 mono_save_args (cfg, sig, inline_args);
8012 /* FIRST CODE BLOCK */
8013 NEW_BBLOCK (cfg, bblock);
8014 bblock->cil_code = ip;
8018 ADD_BBLOCK (cfg, bblock);
8020 if (cfg->method == method) {
8021 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8022 if (breakpoint_id) {
8023 MONO_INST_NEW (cfg, ins, OP_BREAK);
8024 MONO_ADD_INS (bblock, ins);
8028 if (mono_security_cas_enabled ())
8029 secman = mono_security_manager_get_methods ();
8031 security = (secman && mono_security_method_has_declsec (method));
8032 /* at this point having security doesn't mean we have any code to generate */
8033 if (security && (cfg->method == method)) {
8034 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
8035 * And we do not want to enter the next section (with allocation) if we
8036 * have nothing to generate */
8037 security = mono_declsec_get_demands (method, &actions);
8040 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
8041 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
8043 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8044 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8045 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
8047 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
8048 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8052 mono_custom_attrs_free (custom);
8055 custom = mono_custom_attrs_from_class (wrapped->klass);
8056 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8060 mono_custom_attrs_free (custom);
8063 /* not a P/Invoke after all */
8068 /* we use a separate basic block for the initialization code */
8069 NEW_BBLOCK (cfg, init_localsbb);
8070 cfg->bb_init = init_localsbb;
8071 init_localsbb->real_offset = cfg->real_offset;
8072 start_bblock->next_bb = init_localsbb;
8073 init_localsbb->next_bb = bblock;
8074 link_bblock (cfg, start_bblock, init_localsbb);
8075 link_bblock (cfg, init_localsbb, bblock);
8077 cfg->cbb = init_localsbb;
8079 if (cfg->gsharedvt && cfg->method == method) {
8080 MonoGSharedVtMethodInfo *info;
8081 MonoInst *var, *locals_var;
8084 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8085 info->method = cfg->method;
8086 info->count_entries = 16;
8087 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8088 cfg->gsharedvt_info = info;
8090 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8091 /* prevent it from being register allocated */
8092 //var->flags |= MONO_INST_VOLATILE;
8093 cfg->gsharedvt_info_var = var;
8095 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8096 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8098 /* Allocate locals */
8099 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8100 /* prevent it from being register allocated */
8101 //locals_var->flags |= MONO_INST_VOLATILE;
8102 cfg->gsharedvt_locals_var = locals_var;
8104 dreg = alloc_ireg (cfg);
8105 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8107 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8108 ins->dreg = locals_var->dreg;
8110 MONO_ADD_INS (cfg->cbb, ins);
8111 cfg->gsharedvt_locals_var_ins = ins;
8113 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8116 ins->flags |= MONO_INST_INIT;
8120 /* at this point we know, if security is TRUE, that some code needs to be generated */
8121 if (security && (cfg->method == method)) {
8124 cfg->stat_cas_demand_generation++;
8126 if (actions.demand.blob) {
8127 /* Add code for SecurityAction.Demand */
8128 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
8129 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
8130 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8131 mono_emit_method_call (cfg, secman->demand, args, NULL);
8133 if (actions.noncasdemand.blob) {
8134 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
8135 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
8136 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
8137 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
8138 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8139 mono_emit_method_call (cfg, secman->demand, args, NULL);
8141 if (actions.demandchoice.blob) {
8142 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
8143 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
8144 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
8145 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
8146 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
8150 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
8152 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
8155 if (mono_security_core_clr_enabled ()) {
8156 /* check if this is native code, e.g. an icall or a p/invoke */
8157 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8158 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8160 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8161 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8163 /* if this ia a native call then it can only be JITted from platform code */
8164 if ((icall || pinvk) && method->klass && method->klass->image) {
8165 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8166 MonoException *ex = icall ? mono_get_exception_security () :
8167 mono_get_exception_method_access ();
8168 emit_throw_exception (cfg, ex);
8175 CHECK_CFG_EXCEPTION;
8177 if (header->code_size == 0)
8180 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8185 if (cfg->method == method)
8186 mono_debug_init_method (cfg, bblock, breakpoint_id);
8188 for (n = 0; n < header->num_locals; ++n) {
8189 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8194 /* We force the vtable variable here for all shared methods
8195 for the possibility that they might show up in a stack
8196 trace where their exact instantiation is needed. */
8197 if (cfg->generic_sharing_context && method == cfg->method) {
8198 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8199 mini_method_get_context (method)->method_inst ||
8200 method->klass->valuetype) {
8201 mono_get_vtable_var (cfg);
8203 /* FIXME: Is there a better way to do this?
8204 We need the variable live for the duration
8205 of the whole method. */
8206 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8210 /* add a check for this != NULL to inlined methods */
8211 if (is_virtual_call) {
8214 NEW_ARGLOAD (cfg, arg_ins, 0);
8215 MONO_ADD_INS (cfg->cbb, arg_ins);
8216 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8219 skip_dead_blocks = !dont_verify;
8220 if (skip_dead_blocks) {
8221 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8226 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8227 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8230 start_new_bblock = 0;
8233 if (cfg->method == method)
8234 cfg->real_offset = ip - header->code;
8236 cfg->real_offset = inline_offset;
8241 if (start_new_bblock) {
8242 bblock->cil_length = ip - bblock->cil_code;
8243 if (start_new_bblock == 2) {
8244 g_assert (ip == tblock->cil_code);
8246 GET_BBLOCK (cfg, tblock, ip);
8248 bblock->next_bb = tblock;
8251 start_new_bblock = 0;
8252 for (i = 0; i < bblock->in_scount; ++i) {
8253 if (cfg->verbose_level > 3)
8254 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8255 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8259 g_slist_free (class_inits);
8262 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8263 link_bblock (cfg, bblock, tblock);
8264 if (sp != stack_start) {
8265 handle_stack_args (cfg, stack_start, sp - stack_start);
8267 CHECK_UNVERIFIABLE (cfg);
8269 bblock->next_bb = tblock;
8272 for (i = 0; i < bblock->in_scount; ++i) {
8273 if (cfg->verbose_level > 3)
8274 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8275 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8278 g_slist_free (class_inits);
8283 if (skip_dead_blocks) {
8284 int ip_offset = ip - header->code;
8286 if (ip_offset == bb->end)
8290 int op_size = mono_opcode_size (ip, end);
8291 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8293 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8295 if (ip_offset + op_size == bb->end) {
8296 MONO_INST_NEW (cfg, ins, OP_NOP);
8297 MONO_ADD_INS (bblock, ins);
8298 start_new_bblock = 1;
8306 * Sequence points are points where the debugger can place a breakpoint.
8307 * Currently, we generate these automatically at points where the IL
8310 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8312 * Make methods interruptable at the beginning, and at the targets of
8313 * backward branches.
8314 * Also, do this at the start of every bblock in methods with clauses too,
8315 * to be able to handle instructions with inprecise control flow like
8317 * Backward branches are handled at the end of method-to-ir ().
8319 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8321 /* Avoid sequence points on empty IL like .volatile */
8322 // FIXME: Enable this
8323 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8324 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8325 if (sp != stack_start)
8326 ins->flags |= MONO_INST_NONEMPTY_STACK;
8327 MONO_ADD_INS (cfg->cbb, ins);
8330 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8333 bblock->real_offset = cfg->real_offset;
8335 if ((cfg->method == method) && cfg->coverage_info) {
8336 guint32 cil_offset = ip - header->code;
8337 cfg->coverage_info->data [cil_offset].cil_code = ip;
8339 /* TODO: Use an increment here */
8340 #if defined(TARGET_X86)
8341 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8342 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8344 MONO_ADD_INS (cfg->cbb, ins);
8346 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8347 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8351 if (cfg->verbose_level > 3)
8352 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8356 if (seq_points && !sym_seq_points && sp != stack_start) {
8358 * The C# compiler uses these nops to notify the JIT that it should
8359 * insert seq points.
8361 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8362 MONO_ADD_INS (cfg->cbb, ins);
8364 if (cfg->keep_cil_nops)
8365 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8367 MONO_INST_NEW (cfg, ins, OP_NOP);
8369 MONO_ADD_INS (bblock, ins);
8372 if (should_insert_brekpoint (cfg->method)) {
8373 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8375 MONO_INST_NEW (cfg, ins, OP_NOP);
8378 MONO_ADD_INS (bblock, ins);
8384 CHECK_STACK_OVF (1);
8385 n = (*ip)-CEE_LDARG_0;
8387 EMIT_NEW_ARGLOAD (cfg, ins, n);
8395 CHECK_STACK_OVF (1);
8396 n = (*ip)-CEE_LDLOC_0;
8398 EMIT_NEW_LOCLOAD (cfg, ins, n);
8407 n = (*ip)-CEE_STLOC_0;
8410 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8412 emit_stloc_ir (cfg, sp, header, n);
8419 CHECK_STACK_OVF (1);
8422 EMIT_NEW_ARGLOAD (cfg, ins, n);
8428 CHECK_STACK_OVF (1);
8431 NEW_ARGLOADA (cfg, ins, n);
8432 MONO_ADD_INS (cfg->cbb, ins);
8442 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8444 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8449 CHECK_STACK_OVF (1);
8452 EMIT_NEW_LOCLOAD (cfg, ins, n);
8456 case CEE_LDLOCA_S: {
8457 unsigned char *tmp_ip;
8459 CHECK_STACK_OVF (1);
8460 CHECK_LOCAL (ip [1]);
8462 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8468 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8477 CHECK_LOCAL (ip [1]);
8478 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8480 emit_stloc_ir (cfg, sp, header, ip [1]);
8485 CHECK_STACK_OVF (1);
8486 EMIT_NEW_PCONST (cfg, ins, NULL);
8487 ins->type = STACK_OBJ;
8492 CHECK_STACK_OVF (1);
8493 EMIT_NEW_ICONST (cfg, ins, -1);
8506 CHECK_STACK_OVF (1);
8507 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8513 CHECK_STACK_OVF (1);
8515 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8521 CHECK_STACK_OVF (1);
8522 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8528 CHECK_STACK_OVF (1);
8529 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8530 ins->type = STACK_I8;
8531 ins->dreg = alloc_dreg (cfg, STACK_I8);
8533 ins->inst_l = (gint64)read64 (ip);
8534 MONO_ADD_INS (bblock, ins);
8540 gboolean use_aotconst = FALSE;
8542 #ifdef TARGET_POWERPC
8543 /* FIXME: Clean this up */
8544 if (cfg->compile_aot)
8545 use_aotconst = TRUE;
8548 /* FIXME: we should really allocate this only late in the compilation process */
8549 f = mono_domain_alloc (cfg->domain, sizeof (float));
8551 CHECK_STACK_OVF (1);
8557 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8559 dreg = alloc_freg (cfg);
8560 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8561 ins->type = cfg->r4_stack_type;
8563 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8564 ins->type = cfg->r4_stack_type;
8565 ins->dreg = alloc_dreg (cfg, STACK_R8);
8567 MONO_ADD_INS (bblock, ins);
8577 gboolean use_aotconst = FALSE;
8579 #ifdef TARGET_POWERPC
8580 /* FIXME: Clean this up */
8581 if (cfg->compile_aot)
8582 use_aotconst = TRUE;
8585 /* FIXME: we should really allocate this only late in the compilation process */
8586 d = mono_domain_alloc (cfg->domain, sizeof (double));
8588 CHECK_STACK_OVF (1);
8594 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8596 dreg = alloc_freg (cfg);
8597 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8598 ins->type = STACK_R8;
8600 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8601 ins->type = STACK_R8;
8602 ins->dreg = alloc_dreg (cfg, STACK_R8);
8604 MONO_ADD_INS (bblock, ins);
8613 MonoInst *temp, *store;
8615 CHECK_STACK_OVF (1);
8619 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8620 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8622 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8625 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8638 if (sp [0]->type == STACK_R8)
8639 /* we need to pop the value from the x86 FP stack */
8640 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8646 INLINE_FAILURE ("jmp");
8647 GSHAREDVT_FAILURE (*ip);
8650 if (stack_start != sp)
8652 token = read32 (ip + 1);
8653 /* FIXME: check the signature matches */
8654 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8656 if (!cmethod || mono_loader_get_last_error ())
8659 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8660 GENERIC_SHARING_FAILURE (CEE_JMP);
8662 if (mono_security_cas_enabled ())
8663 CHECK_CFG_EXCEPTION;
8665 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8667 if (ARCH_HAVE_OP_TAIL_CALL) {
8668 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8671 /* Handle tail calls similarly to calls */
8672 n = fsig->param_count + fsig->hasthis;
8676 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8677 call->method = cmethod;
8678 call->tail_call = TRUE;
8679 call->signature = mono_method_signature (cmethod);
8680 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8681 call->inst.inst_p0 = cmethod;
8682 for (i = 0; i < n; ++i)
8683 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8685 mono_arch_emit_call (cfg, call);
8686 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8687 MONO_ADD_INS (bblock, (MonoInst*)call);
8689 for (i = 0; i < num_args; ++i)
8690 /* Prevent arguments from being optimized away */
8691 arg_array [i]->flags |= MONO_INST_VOLATILE;
8693 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8694 ins = (MonoInst*)call;
8695 ins->inst_p0 = cmethod;
8696 MONO_ADD_INS (bblock, ins);
8700 start_new_bblock = 1;
8705 case CEE_CALLVIRT: {
8706 MonoInst *addr = NULL;
8707 MonoMethodSignature *fsig = NULL;
8709 int virtual = *ip == CEE_CALLVIRT;
8710 int calli = *ip == CEE_CALLI;
8711 gboolean pass_imt_from_rgctx = FALSE;
8712 MonoInst *imt_arg = NULL;
8713 MonoInst *keep_this_alive = NULL;
8714 gboolean pass_vtable = FALSE;
8715 gboolean pass_mrgctx = FALSE;
8716 MonoInst *vtable_arg = NULL;
8717 gboolean check_this = FALSE;
8718 gboolean supported_tail_call = FALSE;
8719 gboolean tail_call = FALSE;
8720 gboolean need_seq_point = FALSE;
8721 guint32 call_opcode = *ip;
8722 gboolean emit_widen = TRUE;
8723 gboolean push_res = TRUE;
8724 gboolean skip_ret = FALSE;
8725 gboolean delegate_invoke = FALSE;
8728 token = read32 (ip + 1);
8733 //GSHAREDVT_FAILURE (*ip);
8738 fsig = mini_get_signature (method, token, generic_context);
8739 n = fsig->param_count + fsig->hasthis;
8741 if (method->dynamic && fsig->pinvoke) {
8745 * This is a call through a function pointer using a pinvoke
8746 * signature. Have to create a wrapper and call that instead.
8747 * FIXME: This is very slow, need to create a wrapper at JIT time
8748 * instead based on the signature.
8750 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8751 EMIT_NEW_PCONST (cfg, args [1], fsig);
8753 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8756 MonoMethod *cil_method;
8758 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8759 cil_method = cmethod;
8761 if (constrained_call) {
8762 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8763 if (cfg->verbose_level > 2)
8764 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8765 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
8766 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
8767 cfg->generic_sharing_context)) {
8768 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context, &cfg->error);
8772 if (cfg->verbose_level > 2)
8773 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8775 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8777 * This is needed since get_method_constrained can't find
8778 * the method in klass representing a type var.
8779 * The type var is guaranteed to be a reference type in this
8782 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
8783 g_assert (!cmethod->klass->valuetype);
8785 cmethod = mono_get_method_constrained_checked (image, token, constrained_call, generic_context, &cil_method, &cfg->error);
8791 if (!cmethod || mono_loader_get_last_error ())
8793 if (!dont_verify && !cfg->skip_visibility) {
8794 MonoMethod *target_method = cil_method;
8795 if (method->is_inflated) {
8796 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8798 if (!mono_method_can_access_method (method_definition, target_method) &&
8799 !mono_method_can_access_method (method, cil_method))
8800 METHOD_ACCESS_FAILURE (method, cil_method);
8803 if (mono_security_core_clr_enabled ())
8804 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8806 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8807 /* MS.NET seems to silently convert this to a callvirt */
8812 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8813 * converts to a callvirt.
8815 * tests/bug-515884.il is an example of this behavior
8817 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8818 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8819 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8823 if (!cmethod->klass->inited)
8824 if (!mono_class_init (cmethod->klass))
8825 TYPE_LOAD_ERROR (cmethod->klass);
8827 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8828 mini_class_is_system_array (cmethod->klass)) {
8829 array_rank = cmethod->klass->rank;
8830 fsig = mono_method_signature (cmethod);
8832 fsig = mono_method_signature (cmethod);
8837 if (fsig->pinvoke) {
8838 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8839 check_for_pending_exc, cfg->compile_aot);
8840 fsig = mono_method_signature (wrapper);
8841 } else if (constrained_call) {
8842 fsig = mono_method_signature (cmethod);
8844 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8849 mono_save_token_info (cfg, image, token, cil_method);
8851 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8852 need_seq_point = TRUE;
8854 n = fsig->param_count + fsig->hasthis;
8856 /* Don't support calls made using type arguments for now */
8858 if (cfg->gsharedvt) {
8859 if (mini_is_gsharedvt_signature (cfg, fsig))
8860 GSHAREDVT_FAILURE (*ip);
8864 if (mono_security_cas_enabled ()) {
8865 if (check_linkdemand (cfg, method, cmethod))
8866 INLINE_FAILURE ("linkdemand");
8867 CHECK_CFG_EXCEPTION;
8870 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8871 g_assert_not_reached ();
8874 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8877 if (!cfg->generic_sharing_context && cmethod)
8878 g_assert (!mono_method_check_context_used (cmethod));
8882 //g_assert (!virtual || fsig->hasthis);
8886 if (constrained_call) {
8887 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8888 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8889 /* The 'Own method' case below */
8890 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8891 /* 'The type parameter is instantiated as a reference type' case below. */
8893 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_call, &emit_widen, &bblock);
8894 CHECK_CFG_EXCEPTION;
8901 * We have the `constrained.' prefix opcode.
8903 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8905 * The type parameter is instantiated as a valuetype,
8906 * but that type doesn't override the method we're
8907 * calling, so we need to box `this'.
8909 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8910 ins->klass = constrained_call;
8911 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8912 CHECK_CFG_EXCEPTION;
8913 } else if (!constrained_call->valuetype) {
8914 int dreg = alloc_ireg_ref (cfg);
8917 * The type parameter is instantiated as a reference
8918 * type. We have a managed pointer on the stack, so
8919 * we need to dereference it here.
8921 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8922 ins->type = STACK_OBJ;
8925 if (cmethod->klass->valuetype) {
8928 /* Interface method */
8931 mono_class_setup_vtable (constrained_call);
8932 CHECK_TYPELOAD (constrained_call);
8933 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8935 TYPE_LOAD_ERROR (constrained_call);
8936 slot = mono_method_get_vtable_slot (cmethod);
8938 TYPE_LOAD_ERROR (cmethod->klass);
8939 cmethod = constrained_call->vtable [ioffset + slot];
8941 if (cmethod->klass == mono_defaults.enum_class) {
8942 /* Enum implements some interfaces, so treat this as the first case */
8943 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8944 ins->klass = constrained_call;
8945 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8946 CHECK_CFG_EXCEPTION;
8951 constrained_call = NULL;
8954 if (!calli && check_call_signature (cfg, fsig, sp))
8957 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8958 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8959 delegate_invoke = TRUE;
8962 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8964 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8965 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8973 * If the callee is a shared method, then its static cctor
8974 * might not get called after the call was patched.
8976 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8977 emit_generic_class_init (cfg, cmethod->klass);
8978 CHECK_TYPELOAD (cmethod->klass);
8982 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8984 if (cfg->generic_sharing_context && cmethod) {
8985 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8987 context_used = mini_method_check_context_used (cfg, cmethod);
8989 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8990 /* Generic method interface
8991 calls are resolved via a
8992 helper function and don't
8994 if (!cmethod_context || !cmethod_context->method_inst)
8995 pass_imt_from_rgctx = TRUE;
8999 * If a shared method calls another
9000 * shared method then the caller must
9001 * have a generic sharing context
9002 * because the magic trampoline
9003 * requires it. FIXME: We shouldn't
9004 * have to force the vtable/mrgctx
9005 * variable here. Instead there
9006 * should be a flag in the cfg to
9007 * request a generic sharing context.
9010 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9011 mono_get_vtable_var (cfg);
9016 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9018 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9020 CHECK_TYPELOAD (cmethod->klass);
9021 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9026 g_assert (!vtable_arg);
9028 if (!cfg->compile_aot) {
9030 * emit_get_rgctx_method () calls mono_class_vtable () so check
9031 * for type load errors before.
9033 mono_class_setup_vtable (cmethod->klass);
9034 CHECK_TYPELOAD (cmethod->klass);
9037 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9039 /* !marshalbyref is needed to properly handle generic methods + remoting */
9040 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9041 MONO_METHOD_IS_FINAL (cmethod)) &&
9042 !mono_class_is_marshalbyref (cmethod->klass)) {
9049 if (pass_imt_from_rgctx) {
9050 g_assert (!pass_vtable);
9053 imt_arg = emit_get_rgctx_method (cfg, context_used,
9054 cmethod, MONO_RGCTX_INFO_METHOD);
9058 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9060 /* Calling virtual generic methods */
9061 if (cmethod && virtual &&
9062 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9063 !(MONO_METHOD_IS_FINAL (cmethod) &&
9064 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9065 fsig->generic_param_count &&
9066 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9067 MonoInst *this_temp, *this_arg_temp, *store;
9068 MonoInst *iargs [4];
9069 gboolean use_imt = FALSE;
9071 g_assert (fsig->is_inflated);
9073 /* Prevent inlining of methods that contain indirect calls */
9074 INLINE_FAILURE ("virtual generic call");
9076 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9077 GSHAREDVT_FAILURE (*ip);
9079 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9080 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
9085 g_assert (!imt_arg);
9087 g_assert (cmethod->is_inflated);
9088 imt_arg = emit_get_rgctx_method (cfg, context_used,
9089 cmethod, MONO_RGCTX_INFO_METHOD);
9090 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9092 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9093 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9094 MONO_ADD_INS (bblock, store);
9096 /* FIXME: This should be a managed pointer */
9097 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9099 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9100 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9101 cmethod, MONO_RGCTX_INFO_METHOD);
9102 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9103 addr = mono_emit_jit_icall (cfg,
9104 mono_helper_compile_generic_method, iargs);
9106 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9108 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9115 * Implement a workaround for the inherent races involved in locking:
9121 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9122 * try block, the Exit () won't be executed, see:
9123 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9124 * To work around this, we extend such try blocks to include the last x bytes
9125 * of the Monitor.Enter () call.
9127 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9128 MonoBasicBlock *tbb;
9130 GET_BBLOCK (cfg, tbb, ip + 5);
9132 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9133 * from Monitor.Enter like ArgumentNullException.
9135 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9136 /* Mark this bblock as needing to be extended */
9137 tbb->extend_try_block = TRUE;
9141 /* Conversion to a JIT intrinsic */
9142 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9144 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9145 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9152 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
9153 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9154 mono_method_check_inlining (cfg, cmethod)) {
9156 gboolean always = FALSE;
9158 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9159 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9160 /* Prevent inlining of methods that call wrappers */
9161 INLINE_FAILURE ("wrapper call");
9162 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9166 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9168 cfg->real_offset += 5;
9170 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9171 /* *sp is already set by inline_method */
9176 inline_costs += costs;
9182 /* Tail recursion elimination */
9183 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9184 gboolean has_vtargs = FALSE;
9187 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9188 INLINE_FAILURE ("tail call");
9190 /* keep it simple */
9191 for (i = fsig->param_count - 1; i >= 0; i--) {
9192 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9197 for (i = 0; i < n; ++i)
9198 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9199 MONO_INST_NEW (cfg, ins, OP_BR);
9200 MONO_ADD_INS (bblock, ins);
9201 tblock = start_bblock->out_bb [0];
9202 link_bblock (cfg, bblock, tblock);
9203 ins->inst_target_bb = tblock;
9204 start_new_bblock = 1;
9206 /* skip the CEE_RET, too */
9207 if (ip_in_bb (cfg, bblock, ip + 5))
9214 inline_costs += 10 * num_calls++;
9217 * Making generic calls out of gsharedvt methods.
9218 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9219 * patching gshared method addresses into a gsharedvt method.
9221 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9222 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9223 MonoRgctxInfoType info_type;
9226 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9227 //GSHAREDVT_FAILURE (*ip);
9228 // disable for possible remoting calls
9229 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9230 GSHAREDVT_FAILURE (*ip);
9231 if (fsig->generic_param_count) {
9232 /* virtual generic call */
9233 g_assert (mono_use_imt);
9234 g_assert (!imt_arg);
9235 /* Same as the virtual generic case above */
9236 imt_arg = emit_get_rgctx_method (cfg, context_used,
9237 cmethod, MONO_RGCTX_INFO_METHOD);
9238 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9240 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9241 /* This can happen when we call a fully instantiated iface method */
9242 imt_arg = emit_get_rgctx_method (cfg, context_used,
9243 cmethod, MONO_RGCTX_INFO_METHOD);
9248 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9249 keep_this_alive = sp [0];
9251 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9252 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9254 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9255 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9257 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9259 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9261 * We pass the address to the gsharedvt trampoline in the rgctx reg
9263 MonoInst *callee = addr;
9265 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9267 GSHAREDVT_FAILURE (*ip);
9269 addr = emit_get_rgctx_sig (cfg, context_used,
9270 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9271 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9275 /* Generic sharing */
9278 * Use this if the callee is gsharedvt sharable too, since
9279 * at runtime we might find an instantiation so the call cannot
9280 * be patched (the 'no_patch' code path in mini-trampolines.c).
9282 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9283 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9284 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9285 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9286 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9287 INLINE_FAILURE ("gshared");
9289 g_assert (cfg->generic_sharing_context && cmethod);
9293 * We are compiling a call to a
9294 * generic method from shared code,
9295 * which means that we have to look up
9296 * the method in the rgctx and do an
9300 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9302 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9303 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9307 /* Indirect calls */
9309 if (call_opcode == CEE_CALL)
9310 g_assert (context_used);
9311 else if (call_opcode == CEE_CALLI)
9312 g_assert (!vtable_arg);
9314 /* FIXME: what the hell is this??? */
9315 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
9316 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
9318 /* Prevent inlining of methods with indirect calls */
9319 INLINE_FAILURE ("indirect call");
9321 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9326 * Instead of emitting an indirect call, emit a direct call
9327 * with the contents of the aotconst as the patch info.
9329 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9330 info_type = addr->inst_c1;
9331 info_data = addr->inst_p0;
9333 info_type = addr->inst_right->inst_c1;
9334 info_data = addr->inst_right->inst_left;
9337 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9338 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9343 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9351 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9352 MonoInst *val = sp [fsig->param_count];
9354 if (val->type == STACK_OBJ) {
9355 MonoInst *iargs [2];
9360 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9363 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9364 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9365 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9366 emit_write_barrier (cfg, addr, val);
9367 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9368 GSHAREDVT_FAILURE (*ip);
9369 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9370 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9372 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9373 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9374 if (!cmethod->klass->element_class->valuetype && !readonly)
9375 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9376 CHECK_TYPELOAD (cmethod->klass);
9379 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9382 g_assert_not_reached ();
9389 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9393 /* Tail prefix / tail call optimization */
9395 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9396 /* FIXME: runtime generic context pointer for jumps? */
9397 /* FIXME: handle this for generic sharing eventually */
9398 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
9399 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9400 supported_tail_call = TRUE;
9402 if (supported_tail_call) {
9405 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9406 INLINE_FAILURE ("tail call");
9408 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9410 if (ARCH_HAVE_OP_TAIL_CALL) {
9411 /* Handle tail calls similarly to normal calls */
9414 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9416 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9417 call->tail_call = TRUE;
9418 call->method = cmethod;
9419 call->signature = mono_method_signature (cmethod);
9422 * We implement tail calls by storing the actual arguments into the
9423 * argument variables, then emitting a CEE_JMP.
9425 for (i = 0; i < n; ++i) {
9426 /* Prevent argument from being register allocated */
9427 arg_array [i]->flags |= MONO_INST_VOLATILE;
9428 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9430 ins = (MonoInst*)call;
9431 ins->inst_p0 = cmethod;
9432 ins->inst_p1 = arg_array [0];
9433 MONO_ADD_INS (bblock, ins);
9434 link_bblock (cfg, bblock, end_bblock);
9435 start_new_bblock = 1;
9437 // FIXME: Eliminate unreachable epilogs
9440 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9441 * only reachable from this call.
9443 GET_BBLOCK (cfg, tblock, ip + 5);
9444 if (tblock == bblock || tblock->in_count == 0)
9453 * Synchronized wrappers.
9454 * Its hard to determine where to replace a method with its synchronized
9455 * wrapper without causing an infinite recursion. The current solution is
9456 * to add the synchronized wrapper in the trampolines, and to
9457 * change the called method to a dummy wrapper, and resolve that wrapper
9458 * to the real method in mono_jit_compile_method ().
9460 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9461 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9462 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9463 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9467 INLINE_FAILURE ("call");
9468 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9469 imt_arg, vtable_arg);
9472 link_bblock (cfg, bblock, end_bblock);
9473 start_new_bblock = 1;
9475 // FIXME: Eliminate unreachable epilogs
9478 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9479 * only reachable from this call.
9481 GET_BBLOCK (cfg, tblock, ip + 5);
9482 if (tblock == bblock || tblock->in_count == 0)
9489 /* End of call, INS should contain the result of the call, if any */
9491 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9494 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9499 if (keep_this_alive) {
9500 MonoInst *dummy_use;
9502 /* See mono_emit_method_call_full () */
9503 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9506 CHECK_CFG_EXCEPTION;
9510 g_assert (*ip == CEE_RET);
9514 constrained_call = NULL;
9516 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9520 if (cfg->method != method) {
9521 /* return from inlined method */
9523 * If in_count == 0, that means the ret is unreachable due to
9524 * being preceeded by a throw. In that case, inline_method () will
9525 * handle setting the return value
9526 * (test case: test_0_inline_throw ()).
9528 if (return_var && cfg->cbb->in_count) {
9529 MonoType *ret_type = mono_method_signature (method)->ret;
9535 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9538 //g_assert (returnvar != -1);
9539 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9540 cfg->ret_var_set = TRUE;
9543 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9545 if (cfg->lmf_var && cfg->cbb->in_count)
9549 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
9551 if (seq_points && !sym_seq_points) {
9553 * Place a seq point here too even through the IL stack is not
9554 * empty, so a step over on
9557 * will work correctly.
9559 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9560 MONO_ADD_INS (cfg->cbb, ins);
9563 g_assert (!return_var);
9567 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9570 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9573 if (!cfg->vret_addr) {
9576 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9578 EMIT_NEW_RETLOADA (cfg, ret_addr);
9580 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9581 ins->klass = mono_class_from_mono_type (ret_type);
9584 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9585 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9586 MonoInst *iargs [1];
9590 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9591 mono_arch_emit_setret (cfg, method, conv);
9593 mono_arch_emit_setret (cfg, method, *sp);
9596 mono_arch_emit_setret (cfg, method, *sp);
9601 if (sp != stack_start)
9603 MONO_INST_NEW (cfg, ins, OP_BR);
9605 ins->inst_target_bb = end_bblock;
9606 MONO_ADD_INS (bblock, ins);
9607 link_bblock (cfg, bblock, end_bblock);
9608 start_new_bblock = 1;
9612 MONO_INST_NEW (cfg, ins, OP_BR);
9614 target = ip + 1 + (signed char)(*ip);
9616 GET_BBLOCK (cfg, tblock, target);
9617 link_bblock (cfg, bblock, tblock);
9618 ins->inst_target_bb = tblock;
9619 if (sp != stack_start) {
9620 handle_stack_args (cfg, stack_start, sp - stack_start);
9622 CHECK_UNVERIFIABLE (cfg);
9624 MONO_ADD_INS (bblock, ins);
9625 start_new_bblock = 1;
9626 inline_costs += BRANCH_COST;
9640 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9642 target = ip + 1 + *(signed char*)ip;
9648 inline_costs += BRANCH_COST;
9652 MONO_INST_NEW (cfg, ins, OP_BR);
9655 target = ip + 4 + (gint32)read32(ip);
9657 GET_BBLOCK (cfg, tblock, target);
9658 link_bblock (cfg, bblock, tblock);
9659 ins->inst_target_bb = tblock;
9660 if (sp != stack_start) {
9661 handle_stack_args (cfg, stack_start, sp - stack_start);
9663 CHECK_UNVERIFIABLE (cfg);
9666 MONO_ADD_INS (bblock, ins);
9668 start_new_bblock = 1;
9669 inline_costs += BRANCH_COST;
9676 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9677 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9678 guint32 opsize = is_short ? 1 : 4;
9680 CHECK_OPSIZE (opsize);
9682 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9685 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9690 GET_BBLOCK (cfg, tblock, target);
9691 link_bblock (cfg, bblock, tblock);
9692 GET_BBLOCK (cfg, tblock, ip);
9693 link_bblock (cfg, bblock, tblock);
9695 if (sp != stack_start) {
9696 handle_stack_args (cfg, stack_start, sp - stack_start);
9697 CHECK_UNVERIFIABLE (cfg);
9700 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9701 cmp->sreg1 = sp [0]->dreg;
9702 type_from_op (cfg, cmp, sp [0], NULL);
9705 #if SIZEOF_REGISTER == 4
9706 if (cmp->opcode == OP_LCOMPARE_IMM) {
9707 /* Convert it to OP_LCOMPARE */
9708 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9709 ins->type = STACK_I8;
9710 ins->dreg = alloc_dreg (cfg, STACK_I8);
9712 MONO_ADD_INS (bblock, ins);
9713 cmp->opcode = OP_LCOMPARE;
9714 cmp->sreg2 = ins->dreg;
9717 MONO_ADD_INS (bblock, cmp);
9719 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9720 type_from_op (cfg, ins, sp [0], NULL);
9721 MONO_ADD_INS (bblock, ins);
9722 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9723 GET_BBLOCK (cfg, tblock, target);
9724 ins->inst_true_bb = tblock;
9725 GET_BBLOCK (cfg, tblock, ip);
9726 ins->inst_false_bb = tblock;
9727 start_new_bblock = 2;
9730 inline_costs += BRANCH_COST;
9745 MONO_INST_NEW (cfg, ins, *ip);
9747 target = ip + 4 + (gint32)read32(ip);
9753 inline_costs += BRANCH_COST;
9757 MonoBasicBlock **targets;
9758 MonoBasicBlock *default_bblock;
9759 MonoJumpInfoBBTable *table;
9760 int offset_reg = alloc_preg (cfg);
9761 int target_reg = alloc_preg (cfg);
9762 int table_reg = alloc_preg (cfg);
9763 int sum_reg = alloc_preg (cfg);
9764 gboolean use_op_switch;
9768 n = read32 (ip + 1);
9771 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9775 CHECK_OPSIZE (n * sizeof (guint32));
9776 target = ip + n * sizeof (guint32);
9778 GET_BBLOCK (cfg, default_bblock, target);
9779 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9781 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9782 for (i = 0; i < n; ++i) {
9783 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9784 targets [i] = tblock;
9785 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9789 if (sp != stack_start) {
9791 * Link the current bb with the targets as well, so handle_stack_args
9792 * will set their in_stack correctly.
9794 link_bblock (cfg, bblock, default_bblock);
9795 for (i = 0; i < n; ++i)
9796 link_bblock (cfg, bblock, targets [i]);
9798 handle_stack_args (cfg, stack_start, sp - stack_start);
9800 CHECK_UNVERIFIABLE (cfg);
9803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9804 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9807 for (i = 0; i < n; ++i)
9808 link_bblock (cfg, bblock, targets [i]);
9810 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9811 table->table = targets;
9812 table->table_size = n;
9814 use_op_switch = FALSE;
9816 /* ARM implements SWITCH statements differently */
9817 /* FIXME: Make it use the generic implementation */
9818 if (!cfg->compile_aot)
9819 use_op_switch = TRUE;
9822 if (COMPILE_LLVM (cfg))
9823 use_op_switch = TRUE;
9825 cfg->cbb->has_jump_table = 1;
9827 if (use_op_switch) {
9828 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9829 ins->sreg1 = src1->dreg;
9830 ins->inst_p0 = table;
9831 ins->inst_many_bb = targets;
9832 ins->klass = GUINT_TO_POINTER (n);
9833 MONO_ADD_INS (cfg->cbb, ins);
9835 if (sizeof (gpointer) == 8)
9836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9838 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9840 #if SIZEOF_REGISTER == 8
9841 /* The upper word might not be zero, and we add it to a 64 bit address later */
9842 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9845 if (cfg->compile_aot) {
9846 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9848 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9849 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9850 ins->inst_p0 = table;
9851 ins->dreg = table_reg;
9852 MONO_ADD_INS (cfg->cbb, ins);
9855 /* FIXME: Use load_memindex */
9856 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9857 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9858 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9860 start_new_bblock = 1;
9861 inline_costs += (BRANCH_COST * 2);
9881 dreg = alloc_freg (cfg);
9884 dreg = alloc_lreg (cfg);
9887 dreg = alloc_ireg_ref (cfg);
9890 dreg = alloc_preg (cfg);
9893 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9894 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9895 if (*ip == CEE_LDIND_R4)
9896 ins->type = cfg->r4_stack_type;
9897 ins->flags |= ins_flag;
9898 MONO_ADD_INS (bblock, ins);
9900 if (ins_flag & MONO_INST_VOLATILE) {
9901 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9902 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9918 if (ins_flag & MONO_INST_VOLATILE) {
9919 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9920 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9923 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9924 ins->flags |= ins_flag;
9927 MONO_ADD_INS (bblock, ins);
9929 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9930 emit_write_barrier (cfg, sp [0], sp [1]);
9939 MONO_INST_NEW (cfg, ins, (*ip));
9941 ins->sreg1 = sp [0]->dreg;
9942 ins->sreg2 = sp [1]->dreg;
9943 type_from_op (cfg, ins, sp [0], sp [1]);
9945 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9947 /* Use the immediate opcodes if possible */
9948 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9949 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9950 if (imm_opcode != -1) {
9951 ins->opcode = imm_opcode;
9952 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9955 NULLIFY_INS (sp [1]);
9959 MONO_ADD_INS ((cfg)->cbb, (ins));
9961 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
9978 MONO_INST_NEW (cfg, ins, (*ip));
9980 ins->sreg1 = sp [0]->dreg;
9981 ins->sreg2 = sp [1]->dreg;
9982 type_from_op (cfg, ins, sp [0], sp [1]);
9984 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9985 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9987 /* FIXME: Pass opcode to is_inst_imm */
9989 /* Use the immediate opcodes if possible */
9990 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9993 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9994 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9995 /* Keep emulated opcodes which are optimized away later */
9996 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9997 imm_opcode = mono_op_to_op_imm (ins->opcode);
10000 if (imm_opcode != -1) {
10001 ins->opcode = imm_opcode;
10002 if (sp [1]->opcode == OP_I8CONST) {
10003 #if SIZEOF_REGISTER == 8
10004 ins->inst_imm = sp [1]->inst_l;
10006 ins->inst_ls_word = sp [1]->inst_ls_word;
10007 ins->inst_ms_word = sp [1]->inst_ms_word;
10011 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10014 /* Might be followed by an instruction added by add_widen_op */
10015 if (sp [1]->next == NULL)
10016 NULLIFY_INS (sp [1]);
10019 MONO_ADD_INS ((cfg)->cbb, (ins));
10021 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10034 case CEE_CONV_OVF_I8:
10035 case CEE_CONV_OVF_U8:
10036 case CEE_CONV_R_UN:
10039 /* Special case this earlier so we have long constants in the IR */
10040 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10041 int data = sp [-1]->inst_c0;
10042 sp [-1]->opcode = OP_I8CONST;
10043 sp [-1]->type = STACK_I8;
10044 #if SIZEOF_REGISTER == 8
10045 if ((*ip) == CEE_CONV_U8)
10046 sp [-1]->inst_c0 = (guint32)data;
10048 sp [-1]->inst_c0 = data;
10050 sp [-1]->inst_ls_word = data;
10051 if ((*ip) == CEE_CONV_U8)
10052 sp [-1]->inst_ms_word = 0;
10054 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10056 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10063 case CEE_CONV_OVF_I4:
10064 case CEE_CONV_OVF_I1:
10065 case CEE_CONV_OVF_I2:
10066 case CEE_CONV_OVF_I:
10067 case CEE_CONV_OVF_U:
10070 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10071 ADD_UNOP (CEE_CONV_OVF_I8);
10078 case CEE_CONV_OVF_U1:
10079 case CEE_CONV_OVF_U2:
10080 case CEE_CONV_OVF_U4:
10083 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10084 ADD_UNOP (CEE_CONV_OVF_U8);
10091 case CEE_CONV_OVF_I1_UN:
10092 case CEE_CONV_OVF_I2_UN:
10093 case CEE_CONV_OVF_I4_UN:
10094 case CEE_CONV_OVF_I8_UN:
10095 case CEE_CONV_OVF_U1_UN:
10096 case CEE_CONV_OVF_U2_UN:
10097 case CEE_CONV_OVF_U4_UN:
10098 case CEE_CONV_OVF_U8_UN:
10099 case CEE_CONV_OVF_I_UN:
10100 case CEE_CONV_OVF_U_UN:
10107 CHECK_CFG_EXCEPTION;
10111 case CEE_ADD_OVF_UN:
10113 case CEE_MUL_OVF_UN:
10115 case CEE_SUB_OVF_UN:
10121 GSHAREDVT_FAILURE (*ip);
10124 token = read32 (ip + 1);
10125 klass = mini_get_class (method, token, generic_context);
10126 CHECK_TYPELOAD (klass);
10128 if (generic_class_is_reference_type (cfg, klass)) {
10129 MonoInst *store, *load;
10130 int dreg = alloc_ireg_ref (cfg);
10132 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10133 load->flags |= ins_flag;
10134 MONO_ADD_INS (cfg->cbb, load);
10136 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10137 store->flags |= ins_flag;
10138 MONO_ADD_INS (cfg->cbb, store);
10140 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10141 emit_write_barrier (cfg, sp [0], sp [1]);
10143 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10149 int loc_index = -1;
10155 token = read32 (ip + 1);
10156 klass = mini_get_class (method, token, generic_context);
10157 CHECK_TYPELOAD (klass);
10159 /* Optimize the common ldobj+stloc combination */
10162 loc_index = ip [6];
10169 loc_index = ip [5] - CEE_STLOC_0;
10176 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10177 CHECK_LOCAL (loc_index);
10179 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10180 ins->dreg = cfg->locals [loc_index]->dreg;
10181 ins->flags |= ins_flag;
10184 if (ins_flag & MONO_INST_VOLATILE) {
10185 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10186 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10192 /* Optimize the ldobj+stobj combination */
10193 /* The reference case ends up being a load+store anyway */
10194 /* Skip this if the operation is volatile. */
10195 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10200 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10207 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10208 ins->flags |= ins_flag;
10211 if (ins_flag & MONO_INST_VOLATILE) {
10212 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10213 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10222 CHECK_STACK_OVF (1);
10224 n = read32 (ip + 1);
10226 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10227 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10228 ins->type = STACK_OBJ;
10231 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10232 MonoInst *iargs [1];
10233 char *str = mono_method_get_wrapper_data (method, n);
10235 if (cfg->compile_aot)
10236 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10238 EMIT_NEW_PCONST (cfg, iargs [0], str);
10239 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10241 if (cfg->opt & MONO_OPT_SHARED) {
10242 MonoInst *iargs [3];
10244 if (cfg->compile_aot) {
10245 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10247 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10248 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10249 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10250 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10251 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10253 if (bblock->out_of_line) {
10254 MonoInst *iargs [2];
10256 if (image == mono_defaults.corlib) {
10258 * Avoid relocations in AOT and save some space by using a
10259 * version of helper_ldstr specialized to mscorlib.
10261 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10262 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10264 /* Avoid creating the string object */
10265 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10266 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10267 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10271 if (cfg->compile_aot) {
10272 NEW_LDSTRCONST (cfg, ins, image, n);
10274 MONO_ADD_INS (bblock, ins);
10277 NEW_PCONST (cfg, ins, NULL);
10278 ins->type = STACK_OBJ;
10279 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10281 OUT_OF_MEMORY_FAILURE;
10284 MONO_ADD_INS (bblock, ins);
10293 MonoInst *iargs [2];
10294 MonoMethodSignature *fsig;
10297 MonoInst *vtable_arg = NULL;
10300 token = read32 (ip + 1);
10301 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10302 if (!cmethod || mono_loader_get_last_error ())
10304 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10307 mono_save_token_info (cfg, image, token, cmethod);
10309 if (!mono_class_init (cmethod->klass))
10310 TYPE_LOAD_ERROR (cmethod->klass);
10312 context_used = mini_method_check_context_used (cfg, cmethod);
10314 if (mono_security_cas_enabled ()) {
10315 if (check_linkdemand (cfg, method, cmethod))
10316 INLINE_FAILURE ("linkdemand");
10317 CHECK_CFG_EXCEPTION;
10318 } else if (mono_security_core_clr_enabled ()) {
10319 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10322 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10323 emit_generic_class_init (cfg, cmethod->klass);
10324 CHECK_TYPELOAD (cmethod->klass);
10328 if (cfg->gsharedvt) {
10329 if (mini_is_gsharedvt_variable_signature (sig))
10330 GSHAREDVT_FAILURE (*ip);
10334 n = fsig->param_count;
10338 * Generate smaller code for the common newobj <exception> instruction in
10339 * argument checking code.
10341 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10342 is_exception_class (cmethod->klass) && n <= 2 &&
10343 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10344 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10345 MonoInst *iargs [3];
10349 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10352 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10355 iargs [1] = sp [0];
10356 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10359 iargs [1] = sp [0];
10360 iargs [2] = sp [1];
10361 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10364 g_assert_not_reached ();
10372 /* move the args to allow room for 'this' in the first position */
10378 /* check_call_signature () requires sp[0] to be set */
10379 this_ins.type = STACK_OBJ;
10380 sp [0] = &this_ins;
10381 if (check_call_signature (cfg, fsig, sp))
10386 if (mini_class_is_system_array (cmethod->klass)) {
10387 *sp = emit_get_rgctx_method (cfg, context_used,
10388 cmethod, MONO_RGCTX_INFO_METHOD);
10390 /* Avoid varargs in the common case */
10391 if (fsig->param_count == 1)
10392 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10393 else if (fsig->param_count == 2)
10394 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10395 else if (fsig->param_count == 3)
10396 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10397 else if (fsig->param_count == 4)
10398 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10400 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10401 } else if (cmethod->string_ctor) {
10402 g_assert (!context_used);
10403 g_assert (!vtable_arg);
10404 /* we simply pass a null pointer */
10405 EMIT_NEW_PCONST (cfg, *sp, NULL);
10406 /* now call the string ctor */
10407 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10409 if (cmethod->klass->valuetype) {
10410 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10411 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10412 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10417 * The code generated by mini_emit_virtual_call () expects
10418 * iargs [0] to be a boxed instance, but luckily the vcall
10419 * will be transformed into a normal call there.
10421 } else if (context_used) {
10422 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10425 MonoVTable *vtable = NULL;
10427 if (!cfg->compile_aot)
10428 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10429 CHECK_TYPELOAD (cmethod->klass);
10432 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10433 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10434 * As a workaround, we call class cctors before allocating objects.
10436 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10437 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10438 if (cfg->verbose_level > 2)
10439 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10440 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10443 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10446 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10449 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10451 /* Now call the actual ctor */
10452 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10453 CHECK_CFG_EXCEPTION;
10456 if (alloc == NULL) {
10458 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10459 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10467 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10468 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10471 case CEE_CASTCLASS:
10475 token = read32 (ip + 1);
10476 klass = mini_get_class (method, token, generic_context);
10477 CHECK_TYPELOAD (klass);
10478 if (sp [0]->type != STACK_OBJ)
10481 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10482 CHECK_CFG_EXCEPTION;
10491 token = read32 (ip + 1);
10492 klass = mini_get_class (method, token, generic_context);
10493 CHECK_TYPELOAD (klass);
10494 if (sp [0]->type != STACK_OBJ)
10497 context_used = mini_class_check_context_used (cfg, klass);
10499 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10500 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10501 MonoInst *args [3];
10507 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10510 if (cfg->compile_aot)
10511 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
10513 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10515 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10518 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10519 MonoMethod *mono_isinst;
10520 MonoInst *iargs [1];
10523 mono_isinst = mono_marshal_get_isinst (klass);
10524 iargs [0] = sp [0];
10526 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10527 iargs, ip, cfg->real_offset, TRUE, &bblock);
10528 CHECK_CFG_EXCEPTION;
10529 g_assert (costs > 0);
10532 cfg->real_offset += 5;
10536 inline_costs += costs;
10539 ins = handle_isinst (cfg, klass, *sp, context_used);
10540 CHECK_CFG_EXCEPTION;
10547 case CEE_UNBOX_ANY: {
10548 MonoInst *res, *addr;
10553 token = read32 (ip + 1);
10554 klass = mini_get_class (method, token, generic_context);
10555 CHECK_TYPELOAD (klass);
10557 mono_save_token_info (cfg, image, token, klass);
10559 context_used = mini_class_check_context_used (cfg, klass);
10561 if (mini_is_gsharedvt_klass (cfg, klass)) {
10562 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10564 } else if (generic_class_is_reference_type (cfg, klass)) {
10565 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10566 CHECK_CFG_EXCEPTION;
10567 } else if (mono_class_is_nullable (klass)) {
10568 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10570 addr = handle_unbox (cfg, klass, sp, context_used);
10572 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10583 MonoClass *enum_class;
10584 MonoMethod *has_flag;
10590 token = read32 (ip + 1);
10591 klass = mini_get_class (method, token, generic_context);
10592 CHECK_TYPELOAD (klass);
10594 mono_save_token_info (cfg, image, token, klass);
10596 context_used = mini_class_check_context_used (cfg, klass);
10598 if (generic_class_is_reference_type (cfg, klass)) {
10604 if (klass == mono_defaults.void_class)
10606 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10608 /* frequent check in generic code: box (struct), brtrue */
10613 * <push int/long ptr>
10616 * constrained. MyFlags
10617 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10619 * If we find this sequence and the operand types on box and constrained
10620 * are equal, we can emit a specialized instruction sequence instead of
10621 * the very slow HasFlag () call.
10623 if ((cfg->opt & MONO_OPT_INTRINS) &&
10624 /* Cheap checks first. */
10625 ip + 5 + 6 + 5 < end &&
10626 ip [5] == CEE_PREFIX1 &&
10627 ip [6] == CEE_CONSTRAINED_ &&
10628 ip [11] == CEE_CALLVIRT &&
10629 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10630 mono_class_is_enum (klass) &&
10631 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10632 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10633 has_flag->klass == mono_defaults.enum_class &&
10634 !strcmp (has_flag->name, "HasFlag") &&
10635 has_flag->signature->hasthis &&
10636 has_flag->signature->param_count == 1) {
10637 CHECK_TYPELOAD (enum_class);
10639 if (enum_class == klass) {
10640 MonoInst *enum_this, *enum_flag;
10645 enum_this = sp [0];
10646 enum_flag = sp [1];
10648 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10653 // FIXME: LLVM can't handle the inconsistent bb linking
10654 if (!mono_class_is_nullable (klass) &&
10655 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10656 (ip [5] == CEE_BRTRUE ||
10657 ip [5] == CEE_BRTRUE_S ||
10658 ip [5] == CEE_BRFALSE ||
10659 ip [5] == CEE_BRFALSE_S)) {
10660 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10662 MonoBasicBlock *true_bb, *false_bb;
10666 if (cfg->verbose_level > 3) {
10667 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10668 printf ("<box+brtrue opt>\n");
10673 case CEE_BRFALSE_S:
10676 target = ip + 1 + (signed char)(*ip);
10683 target = ip + 4 + (gint)(read32 (ip));
10687 g_assert_not_reached ();
10691 * We need to link both bblocks, since it is needed for handling stack
10692 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10693 * Branching to only one of them would lead to inconsistencies, so
10694 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10696 GET_BBLOCK (cfg, true_bb, target);
10697 GET_BBLOCK (cfg, false_bb, ip);
10699 mono_link_bblock (cfg, cfg->cbb, true_bb);
10700 mono_link_bblock (cfg, cfg->cbb, false_bb);
10702 if (sp != stack_start) {
10703 handle_stack_args (cfg, stack_start, sp - stack_start);
10705 CHECK_UNVERIFIABLE (cfg);
10708 if (COMPILE_LLVM (cfg)) {
10709 dreg = alloc_ireg (cfg);
10710 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10713 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10715 /* The JIT can't eliminate the iconst+compare */
10716 MONO_INST_NEW (cfg, ins, OP_BR);
10717 ins->inst_target_bb = is_true ? true_bb : false_bb;
10718 MONO_ADD_INS (cfg->cbb, ins);
10721 start_new_bblock = 1;
10725 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10727 CHECK_CFG_EXCEPTION;
10736 token = read32 (ip + 1);
10737 klass = mini_get_class (method, token, generic_context);
10738 CHECK_TYPELOAD (klass);
10740 mono_save_token_info (cfg, image, token, klass);
10742 context_used = mini_class_check_context_used (cfg, klass);
10744 if (mono_class_is_nullable (klass)) {
10747 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10748 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10752 ins = handle_unbox (cfg, klass, sp, context_used);
10765 MonoClassField *field;
10766 #ifndef DISABLE_REMOTING
10770 gboolean is_instance;
10772 gpointer addr = NULL;
10773 gboolean is_special_static;
10775 MonoInst *store_val = NULL;
10776 MonoInst *thread_ins;
10779 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10781 if (op == CEE_STFLD) {
10784 store_val = sp [1];
10789 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10791 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10794 if (op == CEE_STSFLD) {
10797 store_val = sp [0];
10802 token = read32 (ip + 1);
10803 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10804 field = mono_method_get_wrapper_data (method, token);
10805 klass = field->parent;
10808 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10811 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10812 FIELD_ACCESS_FAILURE (method, field);
10813 mono_class_init (klass);
10815 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10818 /* if the class is Critical then transparent code cannot access it's fields */
10819 if (!is_instance && mono_security_core_clr_enabled ())
10820 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10822 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10823 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10824 if (mono_security_core_clr_enabled ())
10825 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10829 * LDFLD etc. is usable on static fields as well, so convert those cases to
10832 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10844 g_assert_not_reached ();
10846 is_instance = FALSE;
10849 context_used = mini_class_check_context_used (cfg, klass);
10851 /* INSTANCE CASE */
10853 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10854 if (op == CEE_STFLD) {
10855 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10857 #ifndef DISABLE_REMOTING
10858 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10859 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10860 MonoInst *iargs [5];
10862 GSHAREDVT_FAILURE (op);
10864 iargs [0] = sp [0];
10865 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10866 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10867 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10869 iargs [4] = sp [1];
10871 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10872 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10873 iargs, ip, cfg->real_offset, TRUE, &bblock);
10874 CHECK_CFG_EXCEPTION;
10875 g_assert (costs > 0);
10877 cfg->real_offset += 5;
10879 inline_costs += costs;
10881 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10888 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10890 if (mini_is_gsharedvt_klass (cfg, klass)) {
10891 MonoInst *offset_ins;
10893 context_used = mini_class_check_context_used (cfg, klass);
10895 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10896 dreg = alloc_ireg_mp (cfg);
10897 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10898 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10899 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10901 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10903 if (sp [0]->opcode != OP_LDADDR)
10904 store->flags |= MONO_INST_FAULT;
10906 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10907 /* insert call to write barrier */
10911 dreg = alloc_ireg_mp (cfg);
10912 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10913 emit_write_barrier (cfg, ptr, sp [1]);
10916 store->flags |= ins_flag;
10923 #ifndef DISABLE_REMOTING
10924 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10925 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10926 MonoInst *iargs [4];
10928 GSHAREDVT_FAILURE (op);
10930 iargs [0] = sp [0];
10931 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10932 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10933 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10934 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10935 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10936 iargs, ip, cfg->real_offset, TRUE, &bblock);
10937 CHECK_CFG_EXCEPTION;
10938 g_assert (costs > 0);
10940 cfg->real_offset += 5;
10944 inline_costs += costs;
10946 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10952 if (sp [0]->type == STACK_VTYPE) {
10955 /* Have to compute the address of the variable */
10957 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10959 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10961 g_assert (var->klass == klass);
10963 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10967 if (op == CEE_LDFLDA) {
10968 if (is_magic_tls_access (field)) {
10969 GSHAREDVT_FAILURE (*ip);
10971 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10973 if (sp [0]->type == STACK_OBJ) {
10974 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10975 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10978 dreg = alloc_ireg_mp (cfg);
10980 if (mini_is_gsharedvt_klass (cfg, klass)) {
10981 MonoInst *offset_ins;
10983 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10984 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10986 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10988 ins->klass = mono_class_from_mono_type (field->type);
10989 ins->type = STACK_MP;
10995 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10997 if (mini_is_gsharedvt_klass (cfg, klass)) {
10998 MonoInst *offset_ins;
11000 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11001 dreg = alloc_ireg_mp (cfg);
11002 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11003 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11005 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11007 load->flags |= ins_flag;
11008 if (sp [0]->opcode != OP_LDADDR)
11009 load->flags |= MONO_INST_FAULT;
11023 * We can only support shared generic static
11024 * field access on architectures where the
11025 * trampoline code has been extended to handle
11026 * the generic class init.
11028 #ifndef MONO_ARCH_VTABLE_REG
11029 GENERIC_SHARING_FAILURE (op);
11032 context_used = mini_class_check_context_used (cfg, klass);
11034 ftype = mono_field_get_type (field);
11036 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11039 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11040 * to be called here.
11042 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11043 mono_class_vtable (cfg->domain, klass);
11044 CHECK_TYPELOAD (klass);
11046 mono_domain_lock (cfg->domain);
11047 if (cfg->domain->special_static_fields)
11048 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11049 mono_domain_unlock (cfg->domain);
11051 is_special_static = mono_class_field_is_special_static (field);
11053 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11054 thread_ins = mono_get_thread_intrinsic (cfg);
11058 /* Generate IR to compute the field address */
11059 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11061 * Fast access to TLS data
11062 * Inline version of get_thread_static_data () in
11066 int idx, static_data_reg, array_reg, dreg;
11068 GSHAREDVT_FAILURE (op);
11070 // offset &= 0x7fffffff;
11071 // idx = (offset >> 24) - 1;
11072 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
11073 MONO_ADD_INS (cfg->cbb, thread_ins);
11074 static_data_reg = alloc_ireg (cfg);
11075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11077 if (cfg->compile_aot) {
11078 int offset_reg, offset2_reg, idx_reg;
11080 /* For TLS variables, this will return the TLS offset */
11081 EMIT_NEW_SFLDACONST (cfg, ins, field);
11082 offset_reg = ins->dreg;
11083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11084 idx_reg = alloc_ireg (cfg);
11085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
11086 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
11087 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11088 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11089 array_reg = alloc_ireg (cfg);
11090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11091 offset2_reg = alloc_ireg (cfg);
11092 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
11093 dreg = alloc_ireg (cfg);
11094 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11096 offset = (gsize)addr & 0x7fffffff;
11097 idx = (offset >> 24) - 1;
11099 array_reg = alloc_ireg (cfg);
11100 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11101 dreg = alloc_ireg (cfg);
11102 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
11104 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11105 (cfg->compile_aot && is_special_static) ||
11106 (context_used && is_special_static)) {
11107 MonoInst *iargs [2];
11109 g_assert (field->parent);
11110 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11111 if (context_used) {
11112 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11113 field, MONO_RGCTX_INFO_CLASS_FIELD);
11115 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11117 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11118 } else if (context_used) {
11119 MonoInst *static_data;
11122 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11123 method->klass->name_space, method->klass->name, method->name,
11124 depth, field->offset);
11127 if (mono_class_needs_cctor_run (klass, method))
11128 emit_generic_class_init (cfg, klass);
11131 * The pointer we're computing here is
11133 * super_info.static_data + field->offset
11135 static_data = emit_get_rgctx_klass (cfg, context_used,
11136 klass, MONO_RGCTX_INFO_STATIC_DATA);
11138 if (mini_is_gsharedvt_klass (cfg, klass)) {
11139 MonoInst *offset_ins;
11141 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11142 dreg = alloc_ireg_mp (cfg);
11143 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11144 } else if (field->offset == 0) {
11147 int addr_reg = mono_alloc_preg (cfg);
11148 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11150 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11151 MonoInst *iargs [2];
11153 g_assert (field->parent);
11154 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11155 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11156 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11158 MonoVTable *vtable = NULL;
11160 if (!cfg->compile_aot)
11161 vtable = mono_class_vtable (cfg->domain, klass);
11162 CHECK_TYPELOAD (klass);
11165 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11166 if (!(g_slist_find (class_inits, klass))) {
11167 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11168 if (cfg->verbose_level > 2)
11169 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11170 class_inits = g_slist_prepend (class_inits, klass);
11173 if (cfg->run_cctors) {
11175 /* This makes so that inline cannot trigger */
11176 /* .cctors: too many apps depend on them */
11177 /* running with a specific order... */
11179 if (! vtable->initialized)
11180 INLINE_FAILURE ("class init");
11181 ex = mono_runtime_class_init_full (vtable, FALSE);
11183 set_exception_object (cfg, ex);
11184 goto exception_exit;
11188 if (cfg->compile_aot)
11189 EMIT_NEW_SFLDACONST (cfg, ins, field);
11192 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11194 EMIT_NEW_PCONST (cfg, ins, addr);
11197 MonoInst *iargs [1];
11198 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11199 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11203 /* Generate IR to do the actual load/store operation */
11205 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11206 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11207 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11210 if (op == CEE_LDSFLDA) {
11211 ins->klass = mono_class_from_mono_type (ftype);
11212 ins->type = STACK_PTR;
11214 } else if (op == CEE_STSFLD) {
11217 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11218 store->flags |= ins_flag;
11220 gboolean is_const = FALSE;
11221 MonoVTable *vtable = NULL;
11222 gpointer addr = NULL;
11224 if (!context_used) {
11225 vtable = mono_class_vtable (cfg->domain, klass);
11226 CHECK_TYPELOAD (klass);
11228 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11229 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11230 int ro_type = ftype->type;
11232 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11233 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11234 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11237 GSHAREDVT_FAILURE (op);
11239 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11242 case MONO_TYPE_BOOLEAN:
11244 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11248 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11251 case MONO_TYPE_CHAR:
11253 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11257 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11262 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11266 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11271 case MONO_TYPE_PTR:
11272 case MONO_TYPE_FNPTR:
11273 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11274 type_to_eval_stack_type ((cfg), field->type, *sp);
11277 case MONO_TYPE_STRING:
11278 case MONO_TYPE_OBJECT:
11279 case MONO_TYPE_CLASS:
11280 case MONO_TYPE_SZARRAY:
11281 case MONO_TYPE_ARRAY:
11282 if (!mono_gc_is_moving ()) {
11283 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11284 type_to_eval_stack_type ((cfg), field->type, *sp);
11292 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11297 case MONO_TYPE_VALUETYPE:
11307 CHECK_STACK_OVF (1);
11309 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11310 load->flags |= ins_flag;
11316 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11317 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11318 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11329 token = read32 (ip + 1);
11330 klass = mini_get_class (method, token, generic_context);
11331 CHECK_TYPELOAD (klass);
11332 if (ins_flag & MONO_INST_VOLATILE) {
11333 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11334 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11336 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11337 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11338 ins->flags |= ins_flag;
11339 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11340 generic_class_is_reference_type (cfg, klass)) {
11341 /* insert call to write barrier */
11342 emit_write_barrier (cfg, sp [0], sp [1]);
11354 const char *data_ptr;
11356 guint32 field_token;
11362 token = read32 (ip + 1);
11364 klass = mini_get_class (method, token, generic_context);
11365 CHECK_TYPELOAD (klass);
11367 context_used = mini_class_check_context_used (cfg, klass);
11369 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11370 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11371 ins->sreg1 = sp [0]->dreg;
11372 ins->type = STACK_I4;
11373 ins->dreg = alloc_ireg (cfg);
11374 MONO_ADD_INS (cfg->cbb, ins);
11375 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11378 if (context_used) {
11379 MonoInst *args [3];
11380 MonoClass *array_class = mono_array_class_get (klass, 1);
11381 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11383 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11386 args [0] = emit_get_rgctx_klass (cfg, context_used,
11387 array_class, MONO_RGCTX_INFO_VTABLE);
11392 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11394 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11396 if (cfg->opt & MONO_OPT_SHARED) {
11397 /* Decompose now to avoid problems with references to the domainvar */
11398 MonoInst *iargs [3];
11400 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11401 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11402 iargs [2] = sp [0];
11404 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11406 /* Decompose later since it is needed by abcrem */
11407 MonoClass *array_type = mono_array_class_get (klass, 1);
11408 mono_class_vtable (cfg->domain, array_type);
11409 CHECK_TYPELOAD (array_type);
11411 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11412 ins->dreg = alloc_ireg_ref (cfg);
11413 ins->sreg1 = sp [0]->dreg;
11414 ins->inst_newa_class = klass;
11415 ins->type = STACK_OBJ;
11416 ins->klass = array_type;
11417 MONO_ADD_INS (cfg->cbb, ins);
11418 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11419 cfg->cbb->has_array_access = TRUE;
11421 /* Needed so mono_emit_load_get_addr () gets called */
11422 mono_get_got_var (cfg);
11432 * we inline/optimize the initialization sequence if possible.
11433 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11434 * for small sizes open code the memcpy
11435 * ensure the rva field is big enough
11437 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11438 MonoMethod *memcpy_method = get_memcpy_method ();
11439 MonoInst *iargs [3];
11440 int add_reg = alloc_ireg_mp (cfg);
11442 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11443 if (cfg->compile_aot) {
11444 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11446 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11448 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11449 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11458 if (sp [0]->type != STACK_OBJ)
11461 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11462 ins->dreg = alloc_preg (cfg);
11463 ins->sreg1 = sp [0]->dreg;
11464 ins->type = STACK_I4;
11465 /* This flag will be inherited by the decomposition */
11466 ins->flags |= MONO_INST_FAULT;
11467 MONO_ADD_INS (cfg->cbb, ins);
11468 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11469 cfg->cbb->has_array_access = TRUE;
11477 if (sp [0]->type != STACK_OBJ)
11480 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11482 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11483 CHECK_TYPELOAD (klass);
11484 /* we need to make sure that this array is exactly the type it needs
11485 * to be for correctness. the wrappers are lax with their usage
11486 * so we need to ignore them here
11488 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11489 MonoClass *array_class = mono_array_class_get (klass, 1);
11490 mini_emit_check_array_type (cfg, sp [0], array_class);
11491 CHECK_TYPELOAD (array_class);
11495 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11500 case CEE_LDELEM_I1:
11501 case CEE_LDELEM_U1:
11502 case CEE_LDELEM_I2:
11503 case CEE_LDELEM_U2:
11504 case CEE_LDELEM_I4:
11505 case CEE_LDELEM_U4:
11506 case CEE_LDELEM_I8:
11508 case CEE_LDELEM_R4:
11509 case CEE_LDELEM_R8:
11510 case CEE_LDELEM_REF: {
11516 if (*ip == CEE_LDELEM) {
11518 token = read32 (ip + 1);
11519 klass = mini_get_class (method, token, generic_context);
11520 CHECK_TYPELOAD (klass);
11521 mono_class_init (klass);
11524 klass = array_access_to_klass (*ip);
11526 if (sp [0]->type != STACK_OBJ)
11529 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11531 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11532 // FIXME-VT: OP_ICONST optimization
11533 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11534 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11535 ins->opcode = OP_LOADV_MEMBASE;
11536 } else if (sp [1]->opcode == OP_ICONST) {
11537 int array_reg = sp [0]->dreg;
11538 int index_reg = sp [1]->dreg;
11539 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11541 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11542 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11544 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11545 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11548 if (*ip == CEE_LDELEM)
11555 case CEE_STELEM_I1:
11556 case CEE_STELEM_I2:
11557 case CEE_STELEM_I4:
11558 case CEE_STELEM_I8:
11559 case CEE_STELEM_R4:
11560 case CEE_STELEM_R8:
11561 case CEE_STELEM_REF:
11566 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11568 if (*ip == CEE_STELEM) {
11570 token = read32 (ip + 1);
11571 klass = mini_get_class (method, token, generic_context);
11572 CHECK_TYPELOAD (klass);
11573 mono_class_init (klass);
11576 klass = array_access_to_klass (*ip);
11578 if (sp [0]->type != STACK_OBJ)
11581 emit_array_store (cfg, klass, sp, TRUE);
11583 if (*ip == CEE_STELEM)
11590 case CEE_CKFINITE: {
11594 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11595 ins->sreg1 = sp [0]->dreg;
11596 ins->dreg = alloc_freg (cfg);
11597 ins->type = STACK_R8;
11598 MONO_ADD_INS (bblock, ins);
11600 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11605 case CEE_REFANYVAL: {
11606 MonoInst *src_var, *src;
11608 int klass_reg = alloc_preg (cfg);
11609 int dreg = alloc_preg (cfg);
11611 GSHAREDVT_FAILURE (*ip);
11614 MONO_INST_NEW (cfg, ins, *ip);
11617 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11618 CHECK_TYPELOAD (klass);
11620 context_used = mini_class_check_context_used (cfg, klass);
11623 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11625 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11626 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11629 if (context_used) {
11630 MonoInst *klass_ins;
11632 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11633 klass, MONO_RGCTX_INFO_KLASS);
11636 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11637 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11639 mini_emit_class_check (cfg, klass_reg, klass);
11641 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11642 ins->type = STACK_MP;
11647 case CEE_MKREFANY: {
11648 MonoInst *loc, *addr;
11650 GSHAREDVT_FAILURE (*ip);
11653 MONO_INST_NEW (cfg, ins, *ip);
11656 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11657 CHECK_TYPELOAD (klass);
11659 context_used = mini_class_check_context_used (cfg, klass);
11661 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11662 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11664 if (context_used) {
11665 MonoInst *const_ins;
11666 int type_reg = alloc_preg (cfg);
11668 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11669 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11672 } else if (cfg->compile_aot) {
11673 int const_reg = alloc_preg (cfg);
11674 int type_reg = alloc_preg (cfg);
11676 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11679 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11681 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11682 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11686 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11687 ins->type = STACK_VTYPE;
11688 ins->klass = mono_defaults.typed_reference_class;
11693 case CEE_LDTOKEN: {
11695 MonoClass *handle_class;
11697 CHECK_STACK_OVF (1);
11700 n = read32 (ip + 1);
11702 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11703 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11704 handle = mono_method_get_wrapper_data (method, n);
11705 handle_class = mono_method_get_wrapper_data (method, n + 1);
11706 if (handle_class == mono_defaults.typehandle_class)
11707 handle = &((MonoClass*)handle)->byval_arg;
11710 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11715 mono_class_init (handle_class);
11716 if (cfg->generic_sharing_context) {
11717 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11718 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11719 /* This case handles ldtoken
11720 of an open type, like for
11723 } else if (handle_class == mono_defaults.typehandle_class) {
11724 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11725 } else if (handle_class == mono_defaults.fieldhandle_class)
11726 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11727 else if (handle_class == mono_defaults.methodhandle_class)
11728 context_used = mini_method_check_context_used (cfg, handle);
11730 g_assert_not_reached ();
11733 if ((cfg->opt & MONO_OPT_SHARED) &&
11734 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11735 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11736 MonoInst *addr, *vtvar, *iargs [3];
11737 int method_context_used;
11739 method_context_used = mini_method_check_context_used (cfg, method);
11741 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11743 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11744 EMIT_NEW_ICONST (cfg, iargs [1], n);
11745 if (method_context_used) {
11746 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11747 method, MONO_RGCTX_INFO_METHOD);
11748 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11750 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11751 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11753 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11757 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11759 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11760 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11761 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11762 (cmethod->klass == mono_defaults.systemtype_class) &&
11763 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11764 MonoClass *tclass = mono_class_from_mono_type (handle);
11766 mono_class_init (tclass);
11767 if (context_used) {
11768 ins = emit_get_rgctx_klass (cfg, context_used,
11769 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11770 } else if (cfg->compile_aot) {
11771 if (method->wrapper_type) {
11772 mono_error_init (&error); //got to do it since there are multiple conditionals below
11773 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11774 /* Special case for static synchronized wrappers */
11775 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11777 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11778 /* FIXME: n is not a normal token */
11780 EMIT_NEW_PCONST (cfg, ins, NULL);
11783 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11786 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11788 ins->type = STACK_OBJ;
11789 ins->klass = cmethod->klass;
11792 MonoInst *addr, *vtvar;
11794 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11796 if (context_used) {
11797 if (handle_class == mono_defaults.typehandle_class) {
11798 ins = emit_get_rgctx_klass (cfg, context_used,
11799 mono_class_from_mono_type (handle),
11800 MONO_RGCTX_INFO_TYPE);
11801 } else if (handle_class == mono_defaults.methodhandle_class) {
11802 ins = emit_get_rgctx_method (cfg, context_used,
11803 handle, MONO_RGCTX_INFO_METHOD);
11804 } else if (handle_class == mono_defaults.fieldhandle_class) {
11805 ins = emit_get_rgctx_field (cfg, context_used,
11806 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11808 g_assert_not_reached ();
11810 } else if (cfg->compile_aot) {
11811 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11813 EMIT_NEW_PCONST (cfg, ins, handle);
11815 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11816 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11817 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11827 MONO_INST_NEW (cfg, ins, OP_THROW);
11829 ins->sreg1 = sp [0]->dreg;
11831 bblock->out_of_line = TRUE;
11832 MONO_ADD_INS (bblock, ins);
11833 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11834 MONO_ADD_INS (bblock, ins);
11837 link_bblock (cfg, bblock, end_bblock);
11838 start_new_bblock = 1;
11840 case CEE_ENDFINALLY:
11841 /* mono_save_seq_point_info () depends on this */
11842 if (sp != stack_start)
11843 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11844 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11845 MONO_ADD_INS (bblock, ins);
11847 start_new_bblock = 1;
11850 * Control will leave the method so empty the stack, otherwise
11851 * the next basic block will start with a nonempty stack.
11853 while (sp != stack_start) {
11858 case CEE_LEAVE_S: {
11861 if (*ip == CEE_LEAVE) {
11863 target = ip + 5 + (gint32)read32(ip + 1);
11866 target = ip + 2 + (signed char)(ip [1]);
11869 /* empty the stack */
11870 while (sp != stack_start) {
11875 * If this leave statement is in a catch block, check for a
11876 * pending exception, and rethrow it if necessary.
11877 * We avoid doing this in runtime invoke wrappers, since those are called
11878 * by native code which excepts the wrapper to catch all exceptions.
11880 for (i = 0; i < header->num_clauses; ++i) {
11881 MonoExceptionClause *clause = &header->clauses [i];
11884 * Use <= in the final comparison to handle clauses with multiple
11885 * leave statements, like in bug #78024.
11886 * The ordering of the exception clauses guarantees that we find the
11887 * innermost clause.
11889 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11891 MonoBasicBlock *dont_throw;
11896 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11899 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11901 NEW_BBLOCK (cfg, dont_throw);
11904 * Currently, we always rethrow the abort exception, despite the
11905 * fact that this is not correct. See thread6.cs for an example.
11906 * But propagating the abort exception is more important than
11907 * getting the sematics right.
11909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11910 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11911 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11913 MONO_START_BB (cfg, dont_throw);
11918 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11920 MonoExceptionClause *clause;
11922 for (tmp = handlers; tmp; tmp = tmp->next) {
11923 clause = tmp->data;
11924 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11926 link_bblock (cfg, bblock, tblock);
11927 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11928 ins->inst_target_bb = tblock;
11929 ins->inst_eh_block = clause;
11930 MONO_ADD_INS (bblock, ins);
11931 bblock->has_call_handler = 1;
11932 if (COMPILE_LLVM (cfg)) {
11933 MonoBasicBlock *target_bb;
11936 * Link the finally bblock with the target, since it will
11937 * conceptually branch there.
11938 * FIXME: Have to link the bblock containing the endfinally.
11940 GET_BBLOCK (cfg, target_bb, target);
11941 link_bblock (cfg, tblock, target_bb);
11944 g_list_free (handlers);
11947 MONO_INST_NEW (cfg, ins, OP_BR);
11948 MONO_ADD_INS (bblock, ins);
11949 GET_BBLOCK (cfg, tblock, target);
11950 link_bblock (cfg, bblock, tblock);
11951 ins->inst_target_bb = tblock;
11952 start_new_bblock = 1;
11954 if (*ip == CEE_LEAVE)
11963 * Mono specific opcodes
11965 case MONO_CUSTOM_PREFIX: {
11967 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11971 case CEE_MONO_ICALL: {
11973 MonoJitICallInfo *info;
11975 token = read32 (ip + 2);
11976 func = mono_method_get_wrapper_data (method, token);
11977 info = mono_find_jit_icall_by_addr (func);
11979 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11982 CHECK_STACK (info->sig->param_count);
11983 sp -= info->sig->param_count;
11985 ins = mono_emit_jit_icall (cfg, info->func, sp);
11986 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11990 inline_costs += 10 * num_calls++;
11994 case CEE_MONO_LDPTR: {
11997 CHECK_STACK_OVF (1);
11999 token = read32 (ip + 2);
12001 ptr = mono_method_get_wrapper_data (method, token);
12002 /* FIXME: Generalize this */
12003 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
12004 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12009 EMIT_NEW_PCONST (cfg, ins, ptr);
12012 inline_costs += 10 * num_calls++;
12013 /* Can't embed random pointers into AOT code */
12017 case CEE_MONO_JIT_ICALL_ADDR: {
12018 MonoJitICallInfo *callinfo;
12021 CHECK_STACK_OVF (1);
12023 token = read32 (ip + 2);
12025 ptr = mono_method_get_wrapper_data (method, token);
12026 callinfo = mono_find_jit_icall_by_addr (ptr);
12027 g_assert (callinfo);
12028 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12031 inline_costs += 10 * num_calls++;
12034 case CEE_MONO_ICALL_ADDR: {
12035 MonoMethod *cmethod;
12038 CHECK_STACK_OVF (1);
12040 token = read32 (ip + 2);
12042 cmethod = mono_method_get_wrapper_data (method, token);
12044 if (cfg->compile_aot) {
12045 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12047 ptr = mono_lookup_internal_call (cmethod);
12049 EMIT_NEW_PCONST (cfg, ins, ptr);
12055 case CEE_MONO_VTADDR: {
12056 MonoInst *src_var, *src;
12062 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12063 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12068 case CEE_MONO_NEWOBJ: {
12069 MonoInst *iargs [2];
12071 CHECK_STACK_OVF (1);
12073 token = read32 (ip + 2);
12074 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12075 mono_class_init (klass);
12076 NEW_DOMAINCONST (cfg, iargs [0]);
12077 MONO_ADD_INS (cfg->cbb, iargs [0]);
12078 NEW_CLASSCONST (cfg, iargs [1], klass);
12079 MONO_ADD_INS (cfg->cbb, iargs [1]);
12080 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12082 inline_costs += 10 * num_calls++;
12085 case CEE_MONO_OBJADDR:
12088 MONO_INST_NEW (cfg, ins, OP_MOVE);
12089 ins->dreg = alloc_ireg_mp (cfg);
12090 ins->sreg1 = sp [0]->dreg;
12091 ins->type = STACK_MP;
12092 MONO_ADD_INS (cfg->cbb, ins);
12096 case CEE_MONO_LDNATIVEOBJ:
12098 * Similar to LDOBJ, but instead load the unmanaged
12099 * representation of the vtype to the stack.
12104 token = read32 (ip + 2);
12105 klass = mono_method_get_wrapper_data (method, token);
12106 g_assert (klass->valuetype);
12107 mono_class_init (klass);
12110 MonoInst *src, *dest, *temp;
12113 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12114 temp->backend.is_pinvoke = 1;
12115 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12116 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12118 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12119 dest->type = STACK_VTYPE;
12120 dest->klass = klass;
12126 case CEE_MONO_RETOBJ: {
12128 * Same as RET, but return the native representation of a vtype
12131 g_assert (cfg->ret);
12132 g_assert (mono_method_signature (method)->pinvoke);
12137 token = read32 (ip + 2);
12138 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12140 if (!cfg->vret_addr) {
12141 g_assert (cfg->ret_var_is_local);
12143 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12145 EMIT_NEW_RETLOADA (cfg, ins);
12147 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12149 if (sp != stack_start)
12152 MONO_INST_NEW (cfg, ins, OP_BR);
12153 ins->inst_target_bb = end_bblock;
12154 MONO_ADD_INS (bblock, ins);
12155 link_bblock (cfg, bblock, end_bblock);
12156 start_new_bblock = 1;
12160 case CEE_MONO_CISINST:
12161 case CEE_MONO_CCASTCLASS: {
12166 token = read32 (ip + 2);
12167 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12168 if (ip [1] == CEE_MONO_CISINST)
12169 ins = handle_cisinst (cfg, klass, sp [0]);
12171 ins = handle_ccastclass (cfg, klass, sp [0]);
12177 case CEE_MONO_SAVE_LMF:
12178 case CEE_MONO_RESTORE_LMF:
12179 #ifdef MONO_ARCH_HAVE_LMF_OPS
12180 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12181 MONO_ADD_INS (bblock, ins);
12182 cfg->need_lmf_area = TRUE;
12186 case CEE_MONO_CLASSCONST:
12187 CHECK_STACK_OVF (1);
12189 token = read32 (ip + 2);
12190 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12193 inline_costs += 10 * num_calls++;
12195 case CEE_MONO_NOT_TAKEN:
12196 bblock->out_of_line = TRUE;
12199 case CEE_MONO_TLS: {
12202 CHECK_STACK_OVF (1);
12204 key = (gint32)read32 (ip + 2);
12205 g_assert (key < TLS_KEY_NUM);
12207 ins = mono_create_tls_get (cfg, key);
12209 if (cfg->compile_aot) {
12211 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12212 ins->dreg = alloc_preg (cfg);
12213 ins->type = STACK_PTR;
12215 g_assert_not_reached ();
12218 ins->type = STACK_PTR;
12219 MONO_ADD_INS (bblock, ins);
12224 case CEE_MONO_DYN_CALL: {
12225 MonoCallInst *call;
12227 /* It would be easier to call a trampoline, but that would put an
12228 * extra frame on the stack, confusing exception handling. So
12229 * implement it inline using an opcode for now.
12232 if (!cfg->dyn_call_var) {
12233 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12234 /* prevent it from being register allocated */
12235 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12238 /* Has to use a call inst since it local regalloc expects it */
12239 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12240 ins = (MonoInst*)call;
12242 ins->sreg1 = sp [0]->dreg;
12243 ins->sreg2 = sp [1]->dreg;
12244 MONO_ADD_INS (bblock, ins);
12246 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12249 inline_costs += 10 * num_calls++;
12253 case CEE_MONO_MEMORY_BARRIER: {
12255 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12259 case CEE_MONO_JIT_ATTACH: {
12260 MonoInst *args [16], *domain_ins;
12261 MonoInst *ad_ins, *jit_tls_ins;
12262 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12264 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12266 EMIT_NEW_PCONST (cfg, ins, NULL);
12267 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12269 ad_ins = mono_get_domain_intrinsic (cfg);
12270 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12272 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12273 NEW_BBLOCK (cfg, next_bb);
12274 NEW_BBLOCK (cfg, call_bb);
12276 if (cfg->compile_aot) {
12277 /* AOT code is only used in the root domain */
12278 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12280 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12282 MONO_ADD_INS (cfg->cbb, ad_ins);
12283 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12284 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12286 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12288 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12290 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12291 MONO_START_BB (cfg, call_bb);
12294 if (cfg->compile_aot) {
12295 /* AOT code is only used in the root domain */
12296 EMIT_NEW_PCONST (cfg, args [0], NULL);
12298 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12300 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12301 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12304 MONO_START_BB (cfg, next_bb);
12310 case CEE_MONO_JIT_DETACH: {
12311 MonoInst *args [16];
12313 /* Restore the original domain */
12314 dreg = alloc_ireg (cfg);
12315 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12316 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12321 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12327 case CEE_PREFIX1: {
12330 case CEE_ARGLIST: {
12331 /* somewhat similar to LDTOKEN */
12332 MonoInst *addr, *vtvar;
12333 CHECK_STACK_OVF (1);
12334 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12336 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12337 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12339 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12340 ins->type = STACK_VTYPE;
12341 ins->klass = mono_defaults.argumenthandle_class;
12351 MonoInst *cmp, *arg1, *arg2;
12359 * The following transforms:
12360 * CEE_CEQ into OP_CEQ
12361 * CEE_CGT into OP_CGT
12362 * CEE_CGT_UN into OP_CGT_UN
12363 * CEE_CLT into OP_CLT
12364 * CEE_CLT_UN into OP_CLT_UN
12366 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12368 MONO_INST_NEW (cfg, ins, cmp->opcode);
12369 cmp->sreg1 = arg1->dreg;
12370 cmp->sreg2 = arg2->dreg;
12371 type_from_op (cfg, cmp, arg1, arg2);
12373 add_widen_op (cfg, cmp, &arg1, &arg2);
12374 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12375 cmp->opcode = OP_LCOMPARE;
12376 else if (arg1->type == STACK_R4)
12377 cmp->opcode = OP_RCOMPARE;
12378 else if (arg1->type == STACK_R8)
12379 cmp->opcode = OP_FCOMPARE;
12381 cmp->opcode = OP_ICOMPARE;
12382 MONO_ADD_INS (bblock, cmp);
12383 ins->type = STACK_I4;
12384 ins->dreg = alloc_dreg (cfg, ins->type);
12385 type_from_op (cfg, ins, arg1, arg2);
12387 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12389 * The backends expect the fceq opcodes to do the
12392 ins->sreg1 = cmp->sreg1;
12393 ins->sreg2 = cmp->sreg2;
12396 MONO_ADD_INS (bblock, ins);
12402 MonoInst *argconst;
12403 MonoMethod *cil_method;
12405 CHECK_STACK_OVF (1);
12407 n = read32 (ip + 2);
12408 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12409 if (!cmethod || mono_loader_get_last_error ())
12411 mono_class_init (cmethod->klass);
12413 mono_save_token_info (cfg, image, n, cmethod);
12415 context_used = mini_method_check_context_used (cfg, cmethod);
12417 cil_method = cmethod;
12418 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12419 METHOD_ACCESS_FAILURE (method, cil_method);
12421 if (mono_security_cas_enabled ()) {
12422 if (check_linkdemand (cfg, method, cmethod))
12423 INLINE_FAILURE ("linkdemand");
12424 CHECK_CFG_EXCEPTION;
12425 } else if (mono_security_core_clr_enabled ()) {
12426 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12430 * Optimize the common case of ldftn+delegate creation
12432 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12433 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12434 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12435 MonoInst *target_ins, *handle_ins;
12436 MonoMethod *invoke;
12437 int invoke_context_used;
12439 invoke = mono_get_delegate_invoke (ctor_method->klass);
12440 if (!invoke || !mono_method_signature (invoke))
12443 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12445 target_ins = sp [-1];
12447 if (mono_security_core_clr_enabled ())
12448 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12450 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12451 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12452 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12453 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12454 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12458 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12459 /* FIXME: SGEN support */
12460 if (invoke_context_used == 0) {
12462 if (cfg->verbose_level > 3)
12463 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12464 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12467 CHECK_CFG_EXCEPTION;
12478 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12479 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12483 inline_costs += 10 * num_calls++;
12486 case CEE_LDVIRTFTN: {
12487 MonoInst *args [2];
12491 n = read32 (ip + 2);
12492 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12493 if (!cmethod || mono_loader_get_last_error ())
12495 mono_class_init (cmethod->klass);
12497 context_used = mini_method_check_context_used (cfg, cmethod);
12499 if (mono_security_cas_enabled ()) {
12500 if (check_linkdemand (cfg, method, cmethod))
12501 INLINE_FAILURE ("linkdemand");
12502 CHECK_CFG_EXCEPTION;
12503 } else if (mono_security_core_clr_enabled ()) {
12504 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12508 * Optimize the common case of ldvirtftn+delegate creation
12510 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12511 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12512 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12513 MonoInst *target_ins, *handle_ins;
12514 MonoMethod *invoke;
12515 int invoke_context_used;
12517 invoke = mono_get_delegate_invoke (ctor_method->klass);
12518 if (!invoke || !mono_method_signature (invoke))
12521 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12523 target_ins = sp [-1];
12525 if (mono_security_core_clr_enabled ())
12526 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12528 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12529 /* FIXME: SGEN support */
12530 if (invoke_context_used == 0) {
12532 if (cfg->verbose_level > 3)
12533 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12534 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
12537 CHECK_CFG_EXCEPTION;
12551 args [1] = emit_get_rgctx_method (cfg, context_used,
12552 cmethod, MONO_RGCTX_INFO_METHOD);
12555 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12557 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12560 inline_costs += 10 * num_calls++;
12564 CHECK_STACK_OVF (1);
12566 n = read16 (ip + 2);
12568 EMIT_NEW_ARGLOAD (cfg, ins, n);
12573 CHECK_STACK_OVF (1);
12575 n = read16 (ip + 2);
12577 NEW_ARGLOADA (cfg, ins, n);
12578 MONO_ADD_INS (cfg->cbb, ins);
12586 n = read16 (ip + 2);
12588 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12590 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12594 CHECK_STACK_OVF (1);
12596 n = read16 (ip + 2);
12598 EMIT_NEW_LOCLOAD (cfg, ins, n);
12603 unsigned char *tmp_ip;
12604 CHECK_STACK_OVF (1);
12606 n = read16 (ip + 2);
12609 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12615 EMIT_NEW_LOCLOADA (cfg, ins, n);
12624 n = read16 (ip + 2);
12626 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12628 emit_stloc_ir (cfg, sp, header, n);
12635 if (sp != stack_start)
12637 if (cfg->method != method)
12639 * Inlining this into a loop in a parent could lead to
12640 * stack overflows which is different behavior than the
12641 * non-inlined case, thus disable inlining in this case.
12643 INLINE_FAILURE("localloc");
12645 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12646 ins->dreg = alloc_preg (cfg);
12647 ins->sreg1 = sp [0]->dreg;
12648 ins->type = STACK_PTR;
12649 MONO_ADD_INS (cfg->cbb, ins);
12651 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12653 ins->flags |= MONO_INST_INIT;
12658 case CEE_ENDFILTER: {
12659 MonoExceptionClause *clause, *nearest;
12660 int cc, nearest_num;
12664 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12666 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12667 ins->sreg1 = (*sp)->dreg;
12668 MONO_ADD_INS (bblock, ins);
12669 start_new_bblock = 1;
12674 for (cc = 0; cc < header->num_clauses; ++cc) {
12675 clause = &header->clauses [cc];
12676 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12677 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12678 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12683 g_assert (nearest);
12684 if ((ip - header->code) != nearest->handler_offset)
12689 case CEE_UNALIGNED_:
12690 ins_flag |= MONO_INST_UNALIGNED;
12691 /* FIXME: record alignment? we can assume 1 for now */
12695 case CEE_VOLATILE_:
12696 ins_flag |= MONO_INST_VOLATILE;
12700 ins_flag |= MONO_INST_TAILCALL;
12701 cfg->flags |= MONO_CFG_HAS_TAIL;
12702 /* Can't inline tail calls at this time */
12703 inline_costs += 100000;
12710 token = read32 (ip + 2);
12711 klass = mini_get_class (method, token, generic_context);
12712 CHECK_TYPELOAD (klass);
12713 if (generic_class_is_reference_type (cfg, klass))
12714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12716 mini_emit_initobj (cfg, *sp, NULL, klass);
12720 case CEE_CONSTRAINED_:
12722 token = read32 (ip + 2);
12723 constrained_call = mini_get_class (method, token, generic_context);
12724 CHECK_TYPELOAD (constrained_call);
12728 case CEE_INITBLK: {
12729 MonoInst *iargs [3];
12733 /* Skip optimized paths for volatile operations. */
12734 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12735 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12736 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12737 /* emit_memset only works when val == 0 */
12738 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12741 iargs [0] = sp [0];
12742 iargs [1] = sp [1];
12743 iargs [2] = sp [2];
12744 if (ip [1] == CEE_CPBLK) {
12746 * FIXME: It's unclear whether we should be emitting both the acquire
12747 * and release barriers for cpblk. It is technically both a load and
12748 * store operation, so it seems like that's the sensible thing to do.
12750 * FIXME: We emit full barriers on both sides of the operation for
12751 * simplicity. We should have a separate atomic memcpy method instead.
12753 MonoMethod *memcpy_method = get_memcpy_method ();
12755 if (ins_flag & MONO_INST_VOLATILE)
12756 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12758 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12759 call->flags |= ins_flag;
12761 if (ins_flag & MONO_INST_VOLATILE)
12762 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12764 MonoMethod *memset_method = get_memset_method ();
12765 if (ins_flag & MONO_INST_VOLATILE) {
12766 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12767 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12769 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12770 call->flags |= ins_flag;
12781 ins_flag |= MONO_INST_NOTYPECHECK;
12783 ins_flag |= MONO_INST_NORANGECHECK;
12784 /* we ignore the no-nullcheck for now since we
12785 * really do it explicitly only when doing callvirt->call
12789 case CEE_RETHROW: {
12791 int handler_offset = -1;
12793 for (i = 0; i < header->num_clauses; ++i) {
12794 MonoExceptionClause *clause = &header->clauses [i];
12795 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12796 handler_offset = clause->handler_offset;
12801 bblock->flags |= BB_EXCEPTION_UNSAFE;
12803 if (handler_offset == -1)
12806 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12807 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12808 ins->sreg1 = load->dreg;
12809 MONO_ADD_INS (bblock, ins);
12811 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12812 MONO_ADD_INS (bblock, ins);
12815 link_bblock (cfg, bblock, end_bblock);
12816 start_new_bblock = 1;
12824 CHECK_STACK_OVF (1);
12826 token = read32 (ip + 2);
12827 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12828 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12831 val = mono_type_size (type, &ialign);
12833 MonoClass *klass = mini_get_class (method, token, generic_context);
12834 CHECK_TYPELOAD (klass);
12836 val = mono_type_size (&klass->byval_arg, &ialign);
12838 if (mini_is_gsharedvt_klass (cfg, klass))
12839 GSHAREDVT_FAILURE (*ip);
12841 EMIT_NEW_ICONST (cfg, ins, val);
12846 case CEE_REFANYTYPE: {
12847 MonoInst *src_var, *src;
12849 GSHAREDVT_FAILURE (*ip);
12855 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12857 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12858 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12859 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12864 case CEE_READONLY_:
12877 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12887 g_warning ("opcode 0x%02x not handled", *ip);
12891 if (start_new_bblock != 1)
12894 bblock->cil_length = ip - bblock->cil_code;
12895 if (bblock->next_bb) {
12896 /* This could already be set because of inlining, #693905 */
12897 MonoBasicBlock *bb = bblock;
12899 while (bb->next_bb)
12901 bb->next_bb = end_bblock;
12903 bblock->next_bb = end_bblock;
12906 if (cfg->method == method && cfg->domainvar) {
12908 MonoInst *get_domain;
12910 cfg->cbb = init_localsbb;
12912 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12913 MONO_ADD_INS (cfg->cbb, get_domain);
12915 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12917 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12918 MONO_ADD_INS (cfg->cbb, store);
12921 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12922 if (cfg->compile_aot)
12923 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12924 mono_get_got_var (cfg);
12927 if (cfg->method == method && cfg->got_var)
12928 mono_emit_load_got_addr (cfg);
12930 if (init_localsbb) {
12931 cfg->cbb = init_localsbb;
12933 for (i = 0; i < header->num_locals; ++i) {
12934 emit_init_local (cfg, i, header->locals [i], init_locals);
12938 if (cfg->init_ref_vars && cfg->method == method) {
12939 /* Emit initialization for ref vars */
12940 // FIXME: Avoid duplication initialization for IL locals.
12941 for (i = 0; i < cfg->num_varinfo; ++i) {
12942 MonoInst *ins = cfg->varinfo [i];
12944 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12945 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12949 if (cfg->lmf_var && cfg->method == method) {
12950 cfg->cbb = init_localsbb;
12951 emit_push_lmf (cfg);
12954 cfg->cbb = init_localsbb;
12955 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12958 MonoBasicBlock *bb;
12961 * Make seq points at backward branch targets interruptable.
12963 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12964 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12965 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12968 /* Add a sequence point for method entry/exit events */
12969 if (cfg->gen_seq_points_debug_data) {
12970 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12971 MONO_ADD_INS (init_localsbb, ins);
12972 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12973 MONO_ADD_INS (cfg->bb_exit, ins);
12977 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12978 * the code they refer to was dead (#11880).
12980 if (sym_seq_points) {
12981 for (i = 0; i < header->code_size; ++i) {
12982 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12985 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12986 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12993 if (cfg->method == method) {
12994 MonoBasicBlock *bb;
12995 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12996 bb->region = mono_find_block_region (cfg, bb->real_offset);
12998 mono_create_spvar_for_region (cfg, bb->region);
12999 if (cfg->verbose_level > 2)
13000 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13004 if (inline_costs < 0) {
13007 /* Method is too large */
13008 mname = mono_method_full_name (method, TRUE);
13009 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13010 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13014 if ((cfg->verbose_level > 2) && (cfg->method == method))
13015 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13020 g_assert (!mono_error_ok (&cfg->error));
13024 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13028 set_exception_type_from_invalid_il (cfg, method, ip);
13032 g_slist_free (class_inits);
13033 mono_basic_block_free (original_bb);
13034 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13035 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13036 if (cfg->exception_type)
13039 return inline_costs;
13043 store_membase_reg_to_store_membase_imm (int opcode)
13046 case OP_STORE_MEMBASE_REG:
13047 return OP_STORE_MEMBASE_IMM;
13048 case OP_STOREI1_MEMBASE_REG:
13049 return OP_STOREI1_MEMBASE_IMM;
13050 case OP_STOREI2_MEMBASE_REG:
13051 return OP_STOREI2_MEMBASE_IMM;
13052 case OP_STOREI4_MEMBASE_REG:
13053 return OP_STOREI4_MEMBASE_IMM;
13054 case OP_STOREI8_MEMBASE_REG:
13055 return OP_STOREI8_MEMBASE_IMM;
13057 g_assert_not_reached ();
13064 mono_op_to_op_imm (int opcode)
13068 return OP_IADD_IMM;
13070 return OP_ISUB_IMM;
13072 return OP_IDIV_IMM;
13074 return OP_IDIV_UN_IMM;
13076 return OP_IREM_IMM;
13078 return OP_IREM_UN_IMM;
13080 return OP_IMUL_IMM;
13082 return OP_IAND_IMM;
13086 return OP_IXOR_IMM;
13088 return OP_ISHL_IMM;
13090 return OP_ISHR_IMM;
13092 return OP_ISHR_UN_IMM;
13095 return OP_LADD_IMM;
13097 return OP_LSUB_IMM;
13099 return OP_LAND_IMM;
13103 return OP_LXOR_IMM;
13105 return OP_LSHL_IMM;
13107 return OP_LSHR_IMM;
13109 return OP_LSHR_UN_IMM;
13110 #if SIZEOF_REGISTER == 8
13112 return OP_LREM_IMM;
13116 return OP_COMPARE_IMM;
13118 return OP_ICOMPARE_IMM;
13120 return OP_LCOMPARE_IMM;
13122 case OP_STORE_MEMBASE_REG:
13123 return OP_STORE_MEMBASE_IMM;
13124 case OP_STOREI1_MEMBASE_REG:
13125 return OP_STOREI1_MEMBASE_IMM;
13126 case OP_STOREI2_MEMBASE_REG:
13127 return OP_STOREI2_MEMBASE_IMM;
13128 case OP_STOREI4_MEMBASE_REG:
13129 return OP_STOREI4_MEMBASE_IMM;
13131 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13133 return OP_X86_PUSH_IMM;
13134 case OP_X86_COMPARE_MEMBASE_REG:
13135 return OP_X86_COMPARE_MEMBASE_IMM;
13137 #if defined(TARGET_AMD64)
13138 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13139 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13141 case OP_VOIDCALL_REG:
13142 return OP_VOIDCALL;
13150 return OP_LOCALLOC_IMM;
13157 ldind_to_load_membase (int opcode)
13161 return OP_LOADI1_MEMBASE;
13163 return OP_LOADU1_MEMBASE;
13165 return OP_LOADI2_MEMBASE;
13167 return OP_LOADU2_MEMBASE;
13169 return OP_LOADI4_MEMBASE;
13171 return OP_LOADU4_MEMBASE;
13173 return OP_LOAD_MEMBASE;
13174 case CEE_LDIND_REF:
13175 return OP_LOAD_MEMBASE;
13177 return OP_LOADI8_MEMBASE;
13179 return OP_LOADR4_MEMBASE;
13181 return OP_LOADR8_MEMBASE;
13183 g_assert_not_reached ();
13190 stind_to_store_membase (int opcode)
13194 return OP_STOREI1_MEMBASE_REG;
13196 return OP_STOREI2_MEMBASE_REG;
13198 return OP_STOREI4_MEMBASE_REG;
13200 case CEE_STIND_REF:
13201 return OP_STORE_MEMBASE_REG;
13203 return OP_STOREI8_MEMBASE_REG;
13205 return OP_STORER4_MEMBASE_REG;
13207 return OP_STORER8_MEMBASE_REG;
13209 g_assert_not_reached ();
13216 mono_load_membase_to_load_mem (int opcode)
13218 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13219 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13221 case OP_LOAD_MEMBASE:
13222 return OP_LOAD_MEM;
13223 case OP_LOADU1_MEMBASE:
13224 return OP_LOADU1_MEM;
13225 case OP_LOADU2_MEMBASE:
13226 return OP_LOADU2_MEM;
13227 case OP_LOADI4_MEMBASE:
13228 return OP_LOADI4_MEM;
13229 case OP_LOADU4_MEMBASE:
13230 return OP_LOADU4_MEM;
13231 #if SIZEOF_REGISTER == 8
13232 case OP_LOADI8_MEMBASE:
13233 return OP_LOADI8_MEM;
13242 op_to_op_dest_membase (int store_opcode, int opcode)
13244 #if defined(TARGET_X86)
13245 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13250 return OP_X86_ADD_MEMBASE_REG;
13252 return OP_X86_SUB_MEMBASE_REG;
13254 return OP_X86_AND_MEMBASE_REG;
13256 return OP_X86_OR_MEMBASE_REG;
13258 return OP_X86_XOR_MEMBASE_REG;
13261 return OP_X86_ADD_MEMBASE_IMM;
13264 return OP_X86_SUB_MEMBASE_IMM;
13267 return OP_X86_AND_MEMBASE_IMM;
13270 return OP_X86_OR_MEMBASE_IMM;
13273 return OP_X86_XOR_MEMBASE_IMM;
13279 #if defined(TARGET_AMD64)
13280 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13285 return OP_X86_ADD_MEMBASE_REG;
13287 return OP_X86_SUB_MEMBASE_REG;
13289 return OP_X86_AND_MEMBASE_REG;
13291 return OP_X86_OR_MEMBASE_REG;
13293 return OP_X86_XOR_MEMBASE_REG;
13295 return OP_X86_ADD_MEMBASE_IMM;
13297 return OP_X86_SUB_MEMBASE_IMM;
13299 return OP_X86_AND_MEMBASE_IMM;
13301 return OP_X86_OR_MEMBASE_IMM;
13303 return OP_X86_XOR_MEMBASE_IMM;
13305 return OP_AMD64_ADD_MEMBASE_REG;
13307 return OP_AMD64_SUB_MEMBASE_REG;
13309 return OP_AMD64_AND_MEMBASE_REG;
13311 return OP_AMD64_OR_MEMBASE_REG;
13313 return OP_AMD64_XOR_MEMBASE_REG;
13316 return OP_AMD64_ADD_MEMBASE_IMM;
13319 return OP_AMD64_SUB_MEMBASE_IMM;
13322 return OP_AMD64_AND_MEMBASE_IMM;
13325 return OP_AMD64_OR_MEMBASE_IMM;
13328 return OP_AMD64_XOR_MEMBASE_IMM;
13338 op_to_op_store_membase (int store_opcode, int opcode)
13340 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13343 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13344 return OP_X86_SETEQ_MEMBASE;
13346 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13347 return OP_X86_SETNE_MEMBASE;
13355 op_to_op_src1_membase (int load_opcode, int opcode)
13358 /* FIXME: This has sign extension issues */
13360 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13361 return OP_X86_COMPARE_MEMBASE8_IMM;
13364 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13369 return OP_X86_PUSH_MEMBASE;
13370 case OP_COMPARE_IMM:
13371 case OP_ICOMPARE_IMM:
13372 return OP_X86_COMPARE_MEMBASE_IMM;
13375 return OP_X86_COMPARE_MEMBASE_REG;
13379 #ifdef TARGET_AMD64
13380 /* FIXME: This has sign extension issues */
13382 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13383 return OP_X86_COMPARE_MEMBASE8_IMM;
13388 #ifdef __mono_ilp32__
13389 if (load_opcode == OP_LOADI8_MEMBASE)
13391 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13393 return OP_X86_PUSH_MEMBASE;
13395 /* FIXME: This only works for 32 bit immediates
13396 case OP_COMPARE_IMM:
13397 case OP_LCOMPARE_IMM:
13398 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13399 return OP_AMD64_COMPARE_MEMBASE_IMM;
13401 case OP_ICOMPARE_IMM:
13402 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13403 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13407 #ifdef __mono_ilp32__
13408 if (load_opcode == OP_LOAD_MEMBASE)
13409 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13410 if (load_opcode == OP_LOADI8_MEMBASE)
13412 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13414 return OP_AMD64_COMPARE_MEMBASE_REG;
13417 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13418 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13427 op_to_op_src2_membase (int load_opcode, int opcode)
13430 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13436 return OP_X86_COMPARE_REG_MEMBASE;
13438 return OP_X86_ADD_REG_MEMBASE;
13440 return OP_X86_SUB_REG_MEMBASE;
13442 return OP_X86_AND_REG_MEMBASE;
13444 return OP_X86_OR_REG_MEMBASE;
13446 return OP_X86_XOR_REG_MEMBASE;
13450 #ifdef TARGET_AMD64
13451 #ifdef __mono_ilp32__
13452 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13454 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13458 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13460 return OP_X86_ADD_REG_MEMBASE;
13462 return OP_X86_SUB_REG_MEMBASE;
13464 return OP_X86_AND_REG_MEMBASE;
13466 return OP_X86_OR_REG_MEMBASE;
13468 return OP_X86_XOR_REG_MEMBASE;
13470 #ifdef __mono_ilp32__
13471 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13473 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13478 return OP_AMD64_COMPARE_REG_MEMBASE;
13480 return OP_AMD64_ADD_REG_MEMBASE;
13482 return OP_AMD64_SUB_REG_MEMBASE;
13484 return OP_AMD64_AND_REG_MEMBASE;
13486 return OP_AMD64_OR_REG_MEMBASE;
13488 return OP_AMD64_XOR_REG_MEMBASE;
13497 mono_op_to_op_imm_noemul (int opcode)
13500 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13506 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13513 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13518 return mono_op_to_op_imm (opcode);
13523 * mono_handle_global_vregs:
13525 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13529 mono_handle_global_vregs (MonoCompile *cfg)
13531 gint32 *vreg_to_bb;
13532 MonoBasicBlock *bb;
13535 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13537 #ifdef MONO_ARCH_SIMD_INTRINSICS
13538 if (cfg->uses_simd_intrinsics)
13539 mono_simd_simplify_indirection (cfg);
13542 /* Find local vregs used in more than one bb */
13543 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13544 MonoInst *ins = bb->code;
13545 int block_num = bb->block_num;
13547 if (cfg->verbose_level > 2)
13548 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13551 for (; ins; ins = ins->next) {
13552 const char *spec = INS_INFO (ins->opcode);
13553 int regtype = 0, regindex;
13556 if (G_UNLIKELY (cfg->verbose_level > 2))
13557 mono_print_ins (ins);
13559 g_assert (ins->opcode >= MONO_CEE_LAST);
13561 for (regindex = 0; regindex < 4; regindex ++) {
13564 if (regindex == 0) {
13565 regtype = spec [MONO_INST_DEST];
13566 if (regtype == ' ')
13569 } else if (regindex == 1) {
13570 regtype = spec [MONO_INST_SRC1];
13571 if (regtype == ' ')
13574 } else if (regindex == 2) {
13575 regtype = spec [MONO_INST_SRC2];
13576 if (regtype == ' ')
13579 } else if (regindex == 3) {
13580 regtype = spec [MONO_INST_SRC3];
13581 if (regtype == ' ')
13586 #if SIZEOF_REGISTER == 4
13587 /* In the LLVM case, the long opcodes are not decomposed */
13588 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13590 * Since some instructions reference the original long vreg,
13591 * and some reference the two component vregs, it is quite hard
13592 * to determine when it needs to be global. So be conservative.
13594 if (!get_vreg_to_inst (cfg, vreg)) {
13595 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13597 if (cfg->verbose_level > 2)
13598 printf ("LONG VREG R%d made global.\n", vreg);
13602 * Make the component vregs volatile since the optimizations can
13603 * get confused otherwise.
13605 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13606 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13610 g_assert (vreg != -1);
13612 prev_bb = vreg_to_bb [vreg];
13613 if (prev_bb == 0) {
13614 /* 0 is a valid block num */
13615 vreg_to_bb [vreg] = block_num + 1;
13616 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13617 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13620 if (!get_vreg_to_inst (cfg, vreg)) {
13621 if (G_UNLIKELY (cfg->verbose_level > 2))
13622 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13626 if (vreg_is_ref (cfg, vreg))
13627 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13629 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13632 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13635 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13638 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13641 g_assert_not_reached ();
13645 /* Flag as having been used in more than one bb */
13646 vreg_to_bb [vreg] = -1;
13652 /* If a variable is used in only one bblock, convert it into a local vreg */
13653 for (i = 0; i < cfg->num_varinfo; i++) {
13654 MonoInst *var = cfg->varinfo [i];
13655 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13657 switch (var->type) {
13663 #if SIZEOF_REGISTER == 8
13666 #if !defined(TARGET_X86)
13667 /* Enabling this screws up the fp stack on x86 */
13670 if (mono_arch_is_soft_float ())
13673 /* Arguments are implicitly global */
13674 /* Putting R4 vars into registers doesn't work currently */
13675 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13676 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13678 * Make that the variable's liveness interval doesn't contain a call, since
13679 * that would cause the lvreg to be spilled, making the whole optimization
13682 /* This is too slow for JIT compilation */
13684 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13686 int def_index, call_index, ins_index;
13687 gboolean spilled = FALSE;
13692 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13693 const char *spec = INS_INFO (ins->opcode);
13695 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13696 def_index = ins_index;
13698 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13699 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13700 if (call_index > def_index) {
13706 if (MONO_IS_CALL (ins))
13707 call_index = ins_index;
13717 if (G_UNLIKELY (cfg->verbose_level > 2))
13718 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13719 var->flags |= MONO_INST_IS_DEAD;
13720 cfg->vreg_to_inst [var->dreg] = NULL;
13727 * Compress the varinfo and vars tables so the liveness computation is faster and
13728 * takes up less space.
13731 for (i = 0; i < cfg->num_varinfo; ++i) {
13732 MonoInst *var = cfg->varinfo [i];
13733 if (pos < i && cfg->locals_start == i)
13734 cfg->locals_start = pos;
13735 if (!(var->flags & MONO_INST_IS_DEAD)) {
13737 cfg->varinfo [pos] = cfg->varinfo [i];
13738 cfg->varinfo [pos]->inst_c0 = pos;
13739 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13740 cfg->vars [pos].idx = pos;
13741 #if SIZEOF_REGISTER == 4
13742 if (cfg->varinfo [pos]->type == STACK_I8) {
13743 /* Modify the two component vars too */
13746 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13747 var1->inst_c0 = pos;
13748 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13749 var1->inst_c0 = pos;
13756 cfg->num_varinfo = pos;
13757 if (cfg->locals_start > cfg->num_varinfo)
13758 cfg->locals_start = cfg->num_varinfo;
13762 * mono_spill_global_vars:
13764 * Generate spill code for variables which are not allocated to registers,
13765 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13766 * code is generated which could be optimized by the local optimization passes.
13769 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13771 MonoBasicBlock *bb;
13773 int orig_next_vreg;
13774 guint32 *vreg_to_lvreg;
13776 guint32 i, lvregs_len;
13777 gboolean dest_has_lvreg = FALSE;
13778 guint32 stacktypes [128];
13779 MonoInst **live_range_start, **live_range_end;
13780 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13781 int *gsharedvt_vreg_to_idx = NULL;
13783 *need_local_opts = FALSE;
13785 memset (spec2, 0, sizeof (spec2));
13787 /* FIXME: Move this function to mini.c */
13788 stacktypes ['i'] = STACK_PTR;
13789 stacktypes ['l'] = STACK_I8;
13790 stacktypes ['f'] = STACK_R8;
13791 #ifdef MONO_ARCH_SIMD_INTRINSICS
13792 stacktypes ['x'] = STACK_VTYPE;
13795 #if SIZEOF_REGISTER == 4
13796 /* Create MonoInsts for longs */
13797 for (i = 0; i < cfg->num_varinfo; i++) {
13798 MonoInst *ins = cfg->varinfo [i];
13800 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13801 switch (ins->type) {
13806 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13809 g_assert (ins->opcode == OP_REGOFFSET);
13811 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13813 tree->opcode = OP_REGOFFSET;
13814 tree->inst_basereg = ins->inst_basereg;
13815 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13817 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13819 tree->opcode = OP_REGOFFSET;
13820 tree->inst_basereg = ins->inst_basereg;
13821 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13831 if (cfg->compute_gc_maps) {
13832 /* registers need liveness info even for !non refs */
13833 for (i = 0; i < cfg->num_varinfo; i++) {
13834 MonoInst *ins = cfg->varinfo [i];
13836 if (ins->opcode == OP_REGVAR)
13837 ins->flags |= MONO_INST_GC_TRACK;
13841 if (cfg->gsharedvt) {
13842 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13844 for (i = 0; i < cfg->num_varinfo; ++i) {
13845 MonoInst *ins = cfg->varinfo [i];
13848 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13849 if (i >= cfg->locals_start) {
13851 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13852 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13853 ins->opcode = OP_GSHAREDVT_LOCAL;
13854 ins->inst_imm = idx;
13857 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13858 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13864 /* FIXME: widening and truncation */
13867 * As an optimization, when a variable allocated to the stack is first loaded into
13868 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13869 * the variable again.
13871 orig_next_vreg = cfg->next_vreg;
13872 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13873 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13877 * These arrays contain the first and last instructions accessing a given
13879 * Since we emit bblocks in the same order we process them here, and we
13880 * don't split live ranges, these will precisely describe the live range of
13881 * the variable, i.e. the instruction range where a valid value can be found
13882 * in the variables location.
13883 * The live range is computed using the liveness info computed by the liveness pass.
13884 * We can't use vmv->range, since that is an abstract live range, and we need
13885 * one which is instruction precise.
13886 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13888 /* FIXME: Only do this if debugging info is requested */
13889 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13890 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13891 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13892 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13894 /* Add spill loads/stores */
13895 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13898 if (cfg->verbose_level > 2)
13899 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13901 /* Clear vreg_to_lvreg array */
13902 for (i = 0; i < lvregs_len; i++)
13903 vreg_to_lvreg [lvregs [i]] = 0;
13907 MONO_BB_FOR_EACH_INS (bb, ins) {
13908 const char *spec = INS_INFO (ins->opcode);
13909 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13910 gboolean store, no_lvreg;
13911 int sregs [MONO_MAX_SRC_REGS];
13913 if (G_UNLIKELY (cfg->verbose_level > 2))
13914 mono_print_ins (ins);
13916 if (ins->opcode == OP_NOP)
13920 * We handle LDADDR here as well, since it can only be decomposed
13921 * when variable addresses are known.
13923 if (ins->opcode == OP_LDADDR) {
13924 MonoInst *var = ins->inst_p0;
13926 if (var->opcode == OP_VTARG_ADDR) {
13927 /* Happens on SPARC/S390 where vtypes are passed by reference */
13928 MonoInst *vtaddr = var->inst_left;
13929 if (vtaddr->opcode == OP_REGVAR) {
13930 ins->opcode = OP_MOVE;
13931 ins->sreg1 = vtaddr->dreg;
13933 else if (var->inst_left->opcode == OP_REGOFFSET) {
13934 ins->opcode = OP_LOAD_MEMBASE;
13935 ins->inst_basereg = vtaddr->inst_basereg;
13936 ins->inst_offset = vtaddr->inst_offset;
13939 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13940 /* gsharedvt arg passed by ref */
13941 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13943 ins->opcode = OP_LOAD_MEMBASE;
13944 ins->inst_basereg = var->inst_basereg;
13945 ins->inst_offset = var->inst_offset;
13946 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13947 MonoInst *load, *load2, *load3;
13948 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13949 int reg1, reg2, reg3;
13950 MonoInst *info_var = cfg->gsharedvt_info_var;
13951 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13955 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13958 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13960 g_assert (info_var);
13961 g_assert (locals_var);
13963 /* Mark the instruction used to compute the locals var as used */
13964 cfg->gsharedvt_locals_var_ins = NULL;
13966 /* Load the offset */
13967 if (info_var->opcode == OP_REGOFFSET) {
13968 reg1 = alloc_ireg (cfg);
13969 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13970 } else if (info_var->opcode == OP_REGVAR) {
13972 reg1 = info_var->dreg;
13974 g_assert_not_reached ();
13976 reg2 = alloc_ireg (cfg);
13977 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13978 /* Load the locals area address */
13979 reg3 = alloc_ireg (cfg);
13980 if (locals_var->opcode == OP_REGOFFSET) {
13981 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13982 } else if (locals_var->opcode == OP_REGVAR) {
13983 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13985 g_assert_not_reached ();
13987 /* Compute the address */
13988 ins->opcode = OP_PADD;
13992 mono_bblock_insert_before_ins (bb, ins, load3);
13993 mono_bblock_insert_before_ins (bb, load3, load2);
13995 mono_bblock_insert_before_ins (bb, load2, load);
13997 g_assert (var->opcode == OP_REGOFFSET);
13999 ins->opcode = OP_ADD_IMM;
14000 ins->sreg1 = var->inst_basereg;
14001 ins->inst_imm = var->inst_offset;
14004 *need_local_opts = TRUE;
14005 spec = INS_INFO (ins->opcode);
14008 if (ins->opcode < MONO_CEE_LAST) {
14009 mono_print_ins (ins);
14010 g_assert_not_reached ();
14014 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14018 if (MONO_IS_STORE_MEMBASE (ins)) {
14019 tmp_reg = ins->dreg;
14020 ins->dreg = ins->sreg2;
14021 ins->sreg2 = tmp_reg;
14024 spec2 [MONO_INST_DEST] = ' ';
14025 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14026 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14027 spec2 [MONO_INST_SRC3] = ' ';
14029 } else if (MONO_IS_STORE_MEMINDEX (ins))
14030 g_assert_not_reached ();
14035 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14036 printf ("\t %.3s %d", spec, ins->dreg);
14037 num_sregs = mono_inst_get_src_registers (ins, sregs);
14038 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14039 printf (" %d", sregs [srcindex]);
14046 regtype = spec [MONO_INST_DEST];
14047 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14050 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14051 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14052 MonoInst *store_ins;
14054 MonoInst *def_ins = ins;
14055 int dreg = ins->dreg; /* The original vreg */
14057 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14059 if (var->opcode == OP_REGVAR) {
14060 ins->dreg = var->dreg;
14061 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14063 * Instead of emitting a load+store, use a _membase opcode.
14065 g_assert (var->opcode == OP_REGOFFSET);
14066 if (ins->opcode == OP_MOVE) {
14070 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14071 ins->inst_basereg = var->inst_basereg;
14072 ins->inst_offset = var->inst_offset;
14075 spec = INS_INFO (ins->opcode);
14079 g_assert (var->opcode == OP_REGOFFSET);
14081 prev_dreg = ins->dreg;
14083 /* Invalidate any previous lvreg for this vreg */
14084 vreg_to_lvreg [ins->dreg] = 0;
14088 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14090 store_opcode = OP_STOREI8_MEMBASE_REG;
14093 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14095 #if SIZEOF_REGISTER != 8
14096 if (regtype == 'l') {
14097 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14098 mono_bblock_insert_after_ins (bb, ins, store_ins);
14099 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14100 mono_bblock_insert_after_ins (bb, ins, store_ins);
14101 def_ins = store_ins;
14106 g_assert (store_opcode != OP_STOREV_MEMBASE);
14108 /* Try to fuse the store into the instruction itself */
14109 /* FIXME: Add more instructions */
14110 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14111 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14112 ins->inst_imm = ins->inst_c0;
14113 ins->inst_destbasereg = var->inst_basereg;
14114 ins->inst_offset = var->inst_offset;
14115 spec = INS_INFO (ins->opcode);
14116 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14117 ins->opcode = store_opcode;
14118 ins->inst_destbasereg = var->inst_basereg;
14119 ins->inst_offset = var->inst_offset;
14123 tmp_reg = ins->dreg;
14124 ins->dreg = ins->sreg2;
14125 ins->sreg2 = tmp_reg;
14128 spec2 [MONO_INST_DEST] = ' ';
14129 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14130 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14131 spec2 [MONO_INST_SRC3] = ' ';
14133 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14134 // FIXME: The backends expect the base reg to be in inst_basereg
14135 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14137 ins->inst_basereg = var->inst_basereg;
14138 ins->inst_offset = var->inst_offset;
14139 spec = INS_INFO (ins->opcode);
14141 /* printf ("INS: "); mono_print_ins (ins); */
14142 /* Create a store instruction */
14143 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14145 /* Insert it after the instruction */
14146 mono_bblock_insert_after_ins (bb, ins, store_ins);
14148 def_ins = store_ins;
14151 * We can't assign ins->dreg to var->dreg here, since the
14152 * sregs could use it. So set a flag, and do it after
14155 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14156 dest_has_lvreg = TRUE;
14161 if (def_ins && !live_range_start [dreg]) {
14162 live_range_start [dreg] = def_ins;
14163 live_range_start_bb [dreg] = bb;
14166 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14169 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14170 tmp->inst_c1 = dreg;
14171 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14178 num_sregs = mono_inst_get_src_registers (ins, sregs);
14179 for (srcindex = 0; srcindex < 3; ++srcindex) {
14180 regtype = spec [MONO_INST_SRC1 + srcindex];
14181 sreg = sregs [srcindex];
14183 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14184 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14185 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14186 MonoInst *use_ins = ins;
14187 MonoInst *load_ins;
14188 guint32 load_opcode;
14190 if (var->opcode == OP_REGVAR) {
14191 sregs [srcindex] = var->dreg;
14192 //mono_inst_set_src_registers (ins, sregs);
14193 live_range_end [sreg] = use_ins;
14194 live_range_end_bb [sreg] = bb;
14196 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14199 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14200 /* var->dreg is a hreg */
14201 tmp->inst_c1 = sreg;
14202 mono_bblock_insert_after_ins (bb, ins, tmp);
14208 g_assert (var->opcode == OP_REGOFFSET);
14210 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14212 g_assert (load_opcode != OP_LOADV_MEMBASE);
14214 if (vreg_to_lvreg [sreg]) {
14215 g_assert (vreg_to_lvreg [sreg] != -1);
14217 /* The variable is already loaded to an lvreg */
14218 if (G_UNLIKELY (cfg->verbose_level > 2))
14219 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14220 sregs [srcindex] = vreg_to_lvreg [sreg];
14221 //mono_inst_set_src_registers (ins, sregs);
14225 /* Try to fuse the load into the instruction */
14226 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14227 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14228 sregs [0] = var->inst_basereg;
14229 //mono_inst_set_src_registers (ins, sregs);
14230 ins->inst_offset = var->inst_offset;
14231 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14232 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14233 sregs [1] = var->inst_basereg;
14234 //mono_inst_set_src_registers (ins, sregs);
14235 ins->inst_offset = var->inst_offset;
14237 if (MONO_IS_REAL_MOVE (ins)) {
14238 ins->opcode = OP_NOP;
14241 //printf ("%d ", srcindex); mono_print_ins (ins);
14243 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14245 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14246 if (var->dreg == prev_dreg) {
14248 * sreg refers to the value loaded by the load
14249 * emitted below, but we need to use ins->dreg
14250 * since it refers to the store emitted earlier.
14254 g_assert (sreg != -1);
14255 vreg_to_lvreg [var->dreg] = sreg;
14256 g_assert (lvregs_len < 1024);
14257 lvregs [lvregs_len ++] = var->dreg;
14261 sregs [srcindex] = sreg;
14262 //mono_inst_set_src_registers (ins, sregs);
14264 #if SIZEOF_REGISTER != 8
14265 if (regtype == 'l') {
14266 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14267 mono_bblock_insert_before_ins (bb, ins, load_ins);
14268 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14269 mono_bblock_insert_before_ins (bb, ins, load_ins);
14270 use_ins = load_ins;
14275 #if SIZEOF_REGISTER == 4
14276 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14278 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14279 mono_bblock_insert_before_ins (bb, ins, load_ins);
14280 use_ins = load_ins;
14284 if (var->dreg < orig_next_vreg) {
14285 live_range_end [var->dreg] = use_ins;
14286 live_range_end_bb [var->dreg] = bb;
14289 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14292 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14293 tmp->inst_c1 = var->dreg;
14294 mono_bblock_insert_after_ins (bb, ins, tmp);
14298 mono_inst_set_src_registers (ins, sregs);
14300 if (dest_has_lvreg) {
14301 g_assert (ins->dreg != -1);
14302 vreg_to_lvreg [prev_dreg] = ins->dreg;
14303 g_assert (lvregs_len < 1024);
14304 lvregs [lvregs_len ++] = prev_dreg;
14305 dest_has_lvreg = FALSE;
14309 tmp_reg = ins->dreg;
14310 ins->dreg = ins->sreg2;
14311 ins->sreg2 = tmp_reg;
14314 if (MONO_IS_CALL (ins)) {
14315 /* Clear vreg_to_lvreg array */
14316 for (i = 0; i < lvregs_len; i++)
14317 vreg_to_lvreg [lvregs [i]] = 0;
14319 } else if (ins->opcode == OP_NOP) {
14321 MONO_INST_NULLIFY_SREGS (ins);
14324 if (cfg->verbose_level > 2)
14325 mono_print_ins_index (1, ins);
14328 /* Extend the live range based on the liveness info */
14329 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14330 for (i = 0; i < cfg->num_varinfo; i ++) {
14331 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14333 if (vreg_is_volatile (cfg, vi->vreg))
14334 /* The liveness info is incomplete */
14337 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14338 /* Live from at least the first ins of this bb */
14339 live_range_start [vi->vreg] = bb->code;
14340 live_range_start_bb [vi->vreg] = bb;
14343 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14344 /* Live at least until the last ins of this bb */
14345 live_range_end [vi->vreg] = bb->last_ins;
14346 live_range_end_bb [vi->vreg] = bb;
14352 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14354 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14355 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14357 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14358 for (i = 0; i < cfg->num_varinfo; ++i) {
14359 int vreg = MONO_VARINFO (cfg, i)->vreg;
14362 if (live_range_start [vreg]) {
14363 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14365 ins->inst_c1 = vreg;
14366 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14368 if (live_range_end [vreg]) {
14369 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14371 ins->inst_c1 = vreg;
14372 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14373 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14375 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14381 if (cfg->gsharedvt_locals_var_ins) {
14382 /* Nullify if unused */
14383 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14384 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14387 g_free (live_range_start);
14388 g_free (live_range_end);
14389 g_free (live_range_start_bb);
14390 g_free (live_range_end_bb);
14395 * - use 'iadd' instead of 'int_add'
14396 * - handling ovf opcodes: decompose in method_to_ir.
14397 * - unify iregs/fregs
14398 * -> partly done, the missing parts are:
14399 * - a more complete unification would involve unifying the hregs as well, so
14400 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14401 * would no longer map to the machine hregs, so the code generators would need to
14402 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14403 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14404 * fp/non-fp branches speeds it up by about 15%.
14405 * - use sext/zext opcodes instead of shifts
14407 * - get rid of TEMPLOADs if possible and use vregs instead
14408 * - clean up usage of OP_P/OP_ opcodes
14409 * - cleanup usage of DUMMY_USE
14410 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14412 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14413 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14414 * - make sure handle_stack_args () is called before the branch is emitted
14415 * - when the new IR is done, get rid of all unused stuff
14416 * - COMPARE/BEQ as separate instructions or unify them ?
14417 * - keeping them separate allows specialized compare instructions like
14418 * compare_imm, compare_membase
14419 * - most back ends unify fp compare+branch, fp compare+ceq
14420 * - integrate mono_save_args into inline_method
14421 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14422 * - handle long shift opts on 32 bit platforms somehow: they require
14423 * 3 sregs (2 for arg1 and 1 for arg2)
14424 * - make byref a 'normal' type.
14425 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14426 * variable if needed.
14427 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14428 * like inline_method.
14429 * - remove inlining restrictions
14430 * - fix LNEG and enable cfold of INEG
14431 * - generalize x86 optimizations like ldelema as a peephole optimization
14432 * - add store_mem_imm for amd64
14433 * - optimize the loading of the interruption flag in the managed->native wrappers
14434 * - avoid special handling of OP_NOP in passes
14435 * - move code inserting instructions into one function/macro.
14436 * - try a coalescing phase after liveness analysis
14437 * - add float -> vreg conversion + local optimizations on !x86
14438 * - figure out how to handle decomposed branches during optimizations, ie.
14439 * compare+branch, op_jump_table+op_br etc.
14440 * - promote RuntimeXHandles to vregs
14441 * - vtype cleanups:
14442 * - add a NEW_VARLOADA_VREG macro
14443 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14444 * accessing vtype fields.
14445 * - get rid of I8CONST on 64 bit platforms
14446 * - dealing with the increase in code size due to branches created during opcode
14448 * - use extended basic blocks
14449 * - all parts of the JIT
14450 * - handle_global_vregs () && local regalloc
14451 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14452 * - sources of increase in code size:
14455 * - isinst and castclass
14456 * - lvregs not allocated to global registers even if used multiple times
14457 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14459 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14460 * - add all micro optimizations from the old JIT
14461 * - put tree optimizations into the deadce pass
14462 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14463 * specific function.
14464 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14465 * fcompare + branchCC.
14466 * - create a helper function for allocating a stack slot, taking into account
14467 * MONO_CFG_HAS_SPILLUP.
14469 * - merge the ia64 switch changes.
14470 * - optimize mono_regstate2_alloc_int/float.
14471 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14472 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14473 * parts of the tree could be separated by other instructions, killing the tree
14474 * arguments, or stores killing loads etc. Also, should we fold loads into other
14475 * instructions if the result of the load is used multiple times ?
14476 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14477 * - LAST MERGE: 108395.
14478 * - when returning vtypes in registers, generate IR and append it to the end of the
14479 * last bb instead of doing it in the epilog.
14480 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14488 - When to decompose opcodes:
14489 - earlier: this makes some optimizations hard to implement, since the low level IR
14490 no longer contains the neccessary information. But it is easier to do.
14491 - later: harder to implement, enables more optimizations.
14492 - Branches inside bblocks:
14493 - created when decomposing complex opcodes.
14494 - branches to another bblock: harmless, but not tracked by the branch
14495 optimizations, so need to branch to a label at the start of the bblock.
14496 - branches to inside the same bblock: very problematic, trips up the local
14497 reg allocator. Can be fixed by spitting the current bblock, but that is a
14498 complex operation, since some local vregs can become global vregs etc.
14499 - Local/global vregs:
14500 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14501 local register allocator.
14502 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14503 structure, created by mono_create_var (). Assigned to hregs or the stack by
14504 the global register allocator.
14505 - When to do optimizations like alu->alu_imm:
14506 - earlier -> saves work later on since the IR will be smaller/simpler
14507 - later -> can work on more instructions
14508 - Handling of valuetypes:
14509 - When a vtype is pushed on the stack, a new temporary is created, an
14510 instruction computing its address (LDADDR) is emitted and pushed on
14511 the stack. Need to optimize cases when the vtype is used immediately as in
14512 argument passing, stloc etc.
14513 - Instead of the to_end stuff in the old JIT, simply call the function handling
14514 the values on the stack before emitting the last instruction of the bb.
14517 #endif /* DISABLE_JIT */