2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_get_underlying_type (cfg, type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
312 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
334 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
368 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
384 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
395 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 /* Emit conversions so both operands of a binary opcode are of the same type */
464 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
466 MonoInst *arg1 = *arg1_ref;
467 MonoInst *arg2 = *arg2_ref;
470 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
471 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
474 /* Mixing r4/r8 is allowed by the spec */
475 if (arg1->type == STACK_R4) {
476 int dreg = alloc_freg (cfg);
478 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
479 conv->type = STACK_R8;
483 if (arg2->type == STACK_R4) {
484 int dreg = alloc_freg (cfg);
486 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
487 conv->type = STACK_R8;
493 #if SIZEOF_REGISTER == 8
494 /* FIXME: Need to add many more cases */
495 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
498 int dr = alloc_preg (cfg);
499 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
500 (ins)->sreg2 = widen->dreg;
505 #define ADD_BINOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 ins->sreg2 = sp [1]->dreg; \
510 type_from_op (cfg, ins, sp [0], sp [1]); \
512 /* Have to insert a widening op */ \
513 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
514 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
515 MONO_ADD_INS ((cfg)->cbb, (ins)); \
516 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
519 #define ADD_UNOP(op) do { \
520 MONO_INST_NEW (cfg, ins, (op)); \
522 ins->sreg1 = sp [0]->dreg; \
523 type_from_op (cfg, ins, sp [0], NULL); \
525 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
526 MONO_ADD_INS ((cfg)->cbb, (ins)); \
527 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
530 #define ADD_BINCOND(next_block) do { \
533 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
534 cmp->sreg1 = sp [0]->dreg; \
535 cmp->sreg2 = sp [1]->dreg; \
536 type_from_op (cfg, cmp, sp [0], sp [1]); \
538 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
539 type_from_op (cfg, ins, sp [0], sp [1]); \
540 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
541 GET_BBLOCK (cfg, tblock, target); \
542 link_bblock (cfg, bblock, tblock); \
543 ins->inst_true_bb = tblock; \
544 if ((next_block)) { \
545 link_bblock (cfg, bblock, (next_block)); \
546 ins->inst_false_bb = (next_block); \
547 start_new_bblock = 1; \
549 GET_BBLOCK (cfg, tblock, ip); \
550 link_bblock (cfg, bblock, tblock); \
551 ins->inst_false_bb = tblock; \
552 start_new_bblock = 2; \
554 if (sp != stack_start) { \
555 handle_stack_args (cfg, stack_start, sp - stack_start); \
556 CHECK_UNVERIFIABLE (cfg); \
558 MONO_ADD_INS (bblock, cmp); \
559 MONO_ADD_INS (bblock, ins); \
563 * link_bblock: Links two basic blocks
565 * links two basic blocks in the control flow graph, the 'from'
566 * argument is the starting block and the 'to' argument is the block
567 * the control flow ends to after 'from'.
570 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
572 MonoBasicBlock **newa;
576 if (from->cil_code) {
578 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
580 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
583 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
585 printf ("edge from entry to exit\n");
590 for (i = 0; i < from->out_count; ++i) {
591 if (to == from->out_bb [i]) {
597 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
598 for (i = 0; i < from->out_count; ++i) {
599 newa [i] = from->out_bb [i];
607 for (i = 0; i < to->in_count; ++i) {
608 if (from == to->in_bb [i]) {
614 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
615 for (i = 0; i < to->in_count; ++i) {
616 newa [i] = to->in_bb [i];
625 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
627 link_bblock (cfg, from, to);
631 * mono_find_block_region:
633 * We mark each basic block with a region ID. We use that to avoid BB
634 * optimizations when blocks are in different regions.
637 * A region token that encodes where this region is, and information
638 * about the clause owner for this block.
640 * The region encodes the try/catch/filter clause that owns this block
641 * as well as the type. -1 is a special value that represents a block
642 * that is in none of try/catch/filter.
645 mono_find_block_region (MonoCompile *cfg, int offset)
647 MonoMethodHeader *header = cfg->header;
648 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
654 (offset < (clause->handler_offset)))
655 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
657 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
658 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
659 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
660 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
661 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
663 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
666 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
667 return ((i + 1) << 8) | clause->flags;
674 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
676 MonoMethodHeader *header = cfg->header;
677 MonoExceptionClause *clause;
681 for (i = 0; i < header->num_clauses; ++i) {
682 clause = &header->clauses [i];
683 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
684 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
685 if (clause->flags == type)
686 res = g_list_append (res, clause);
693 mono_create_spvar_for_region (MonoCompile *cfg, int region)
697 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
701 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
702 /* prevent it from being register allocated */
703 var->flags |= MONO_INST_VOLATILE;
705 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
709 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
711 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
715 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
719 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
723 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
724 /* prevent it from being register allocated */
725 var->flags |= MONO_INST_VOLATILE;
727 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
733 * Returns the type used in the eval stack when @type is loaded.
734 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
737 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
741 type = mini_get_underlying_type (cfg, type);
742 inst->klass = klass = mono_class_from_mono_type (type);
744 inst->type = STACK_MP;
749 switch (type->type) {
751 inst->type = STACK_INV;
755 case MONO_TYPE_BOOLEAN:
761 inst->type = STACK_I4;
766 case MONO_TYPE_FNPTR:
767 inst->type = STACK_PTR;
769 case MONO_TYPE_CLASS:
770 case MONO_TYPE_STRING:
771 case MONO_TYPE_OBJECT:
772 case MONO_TYPE_SZARRAY:
773 case MONO_TYPE_ARRAY:
774 inst->type = STACK_OBJ;
778 inst->type = STACK_I8;
781 inst->type = cfg->r4_stack_type;
784 inst->type = STACK_R8;
786 case MONO_TYPE_VALUETYPE:
787 if (type->data.klass->enumtype) {
788 type = mono_class_enum_basetype (type->data.klass);
792 inst->type = STACK_VTYPE;
795 case MONO_TYPE_TYPEDBYREF:
796 inst->klass = mono_defaults.typed_reference_class;
797 inst->type = STACK_VTYPE;
799 case MONO_TYPE_GENERICINST:
800 type = &type->data.generic_class->container_class->byval_arg;
804 g_assert (cfg->generic_sharing_context);
805 if (mini_is_gsharedvt_type (cfg, type)) {
806 g_assert (cfg->gsharedvt);
807 inst->type = STACK_VTYPE;
809 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
813 g_error ("unknown type 0x%02x in eval stack type", type->type);
818 * The following tables are used to quickly validate the IL code in type_from_op ().
821 bin_num_table [STACK_MAX] [STACK_MAX] = {
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
827 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
835 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
838 /* reduce the size of this table */
840 bin_int_table [STACK_MAX] [STACK_MAX] = {
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
852 bin_comp_table [STACK_MAX] [STACK_MAX] = {
853 /* Inv i L p F & O vt r4 */
855 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
856 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
857 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
859 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
860 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
861 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
862 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
865 /* reduce the size of this table */
867 shift_table [STACK_MAX] [STACK_MAX] = {
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
879 * Tables to map from the non-specific opcode to the matching
880 * type-specific opcode.
882 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
884 binops_op_map [STACK_MAX] = {
885 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
888 /* handles from CEE_NEG to CEE_CONV_U8 */
890 unops_op_map [STACK_MAX] = {
891 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
894 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
896 ovfops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
900 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
902 ovf2ops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
906 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
908 ovf3ops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
912 /* handles from CEE_BEQ to CEE_BLT_UN */
914 beqops_op_map [STACK_MAX] = {
915 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
918 /* handles from CEE_CEQ to CEE_CLT_UN */
920 ceqops_op_map [STACK_MAX] = {
921 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
925 * Sets ins->type (the type on the eval stack) according to the
926 * type of the opcode and the arguments to it.
927 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
929 * FIXME: this function sets ins->type unconditionally in some cases, but
930 * it should set it to invalid for some types (a conv.x on an object)
933 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
935 switch (ins->opcode) {
942 /* FIXME: check unverifiable args for STACK_MP */
943 ins->type = bin_num_table [src1->type] [src2->type];
944 ins->opcode += binops_op_map [ins->type];
951 ins->type = bin_int_table [src1->type] [src2->type];
952 ins->opcode += binops_op_map [ins->type];
957 ins->type = shift_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
963 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE;
966 else if (src1->type == STACK_R4)
967 ins->opcode = OP_RCOMPARE;
968 else if (src1->type == STACK_R8)
969 ins->opcode = OP_FCOMPARE;
971 ins->opcode = OP_ICOMPARE;
973 case OP_ICOMPARE_IMM:
974 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
975 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
976 ins->opcode = OP_LCOMPARE_IMM;
988 ins->opcode += beqops_op_map [src1->type];
991 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
992 ins->opcode += ceqops_op_map [src1->type];
998 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
999 ins->opcode += ceqops_op_map [src1->type];
1003 ins->type = neg_table [src1->type];
1004 ins->opcode += unops_op_map [ins->type];
1007 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1008 ins->type = src1->type;
1010 ins->type = STACK_INV;
1011 ins->opcode += unops_op_map [ins->type];
1017 ins->type = STACK_I4;
1018 ins->opcode += unops_op_map [src1->type];
1021 ins->type = STACK_R8;
1022 switch (src1->type) {
1025 ins->opcode = OP_ICONV_TO_R_UN;
1028 ins->opcode = OP_LCONV_TO_R_UN;
1032 case CEE_CONV_OVF_I1:
1033 case CEE_CONV_OVF_U1:
1034 case CEE_CONV_OVF_I2:
1035 case CEE_CONV_OVF_U2:
1036 case CEE_CONV_OVF_I4:
1037 case CEE_CONV_OVF_U4:
1038 ins->type = STACK_I4;
1039 ins->opcode += ovf3ops_op_map [src1->type];
1041 case CEE_CONV_OVF_I_UN:
1042 case CEE_CONV_OVF_U_UN:
1043 ins->type = STACK_PTR;
1044 ins->opcode += ovf2ops_op_map [src1->type];
1046 case CEE_CONV_OVF_I1_UN:
1047 case CEE_CONV_OVF_I2_UN:
1048 case CEE_CONV_OVF_I4_UN:
1049 case CEE_CONV_OVF_U1_UN:
1050 case CEE_CONV_OVF_U2_UN:
1051 case CEE_CONV_OVF_U4_UN:
1052 ins->type = STACK_I4;
1053 ins->opcode += ovf2ops_op_map [src1->type];
1056 ins->type = STACK_PTR;
1057 switch (src1->type) {
1059 ins->opcode = OP_ICONV_TO_U;
1063 #if SIZEOF_VOID_P == 8
1064 ins->opcode = OP_LCONV_TO_U;
1066 ins->opcode = OP_MOVE;
1070 ins->opcode = OP_LCONV_TO_U;
1073 ins->opcode = OP_FCONV_TO_U;
1079 ins->type = STACK_I8;
1080 ins->opcode += unops_op_map [src1->type];
1082 case CEE_CONV_OVF_I8:
1083 case CEE_CONV_OVF_U8:
1084 ins->type = STACK_I8;
1085 ins->opcode += ovf3ops_op_map [src1->type];
1087 case CEE_CONV_OVF_U8_UN:
1088 case CEE_CONV_OVF_I8_UN:
1089 ins->type = STACK_I8;
1090 ins->opcode += ovf2ops_op_map [src1->type];
1093 ins->type = cfg->r4_stack_type;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1098 ins->opcode += unops_op_map [src1->type];
1101 ins->type = STACK_R8;
1105 ins->type = STACK_I4;
1106 ins->opcode += ovfops_op_map [src1->type];
1109 case CEE_CONV_OVF_I:
1110 case CEE_CONV_OVF_U:
1111 ins->type = STACK_PTR;
1112 ins->opcode += ovfops_op_map [src1->type];
1115 case CEE_ADD_OVF_UN:
1117 case CEE_MUL_OVF_UN:
1119 case CEE_SUB_OVF_UN:
1120 ins->type = bin_num_table [src1->type] [src2->type];
1121 ins->opcode += ovfops_op_map [src1->type];
1122 if (ins->type == STACK_R8)
1123 ins->type = STACK_INV;
1125 case OP_LOAD_MEMBASE:
1126 ins->type = STACK_PTR;
1128 case OP_LOADI1_MEMBASE:
1129 case OP_LOADU1_MEMBASE:
1130 case OP_LOADI2_MEMBASE:
1131 case OP_LOADU2_MEMBASE:
1132 case OP_LOADI4_MEMBASE:
1133 case OP_LOADU4_MEMBASE:
1134 ins->type = STACK_PTR;
1136 case OP_LOADI8_MEMBASE:
1137 ins->type = STACK_I8;
1139 case OP_LOADR4_MEMBASE:
1140 ins->type = cfg->r4_stack_type;
1142 case OP_LOADR8_MEMBASE:
1143 ins->type = STACK_R8;
1146 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1150 if (ins->type == STACK_MP)
1151 ins->klass = mono_defaults.object_class;
1156 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1162 param_table [STACK_MAX] [STACK_MAX] = {
1167 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1171 switch (args->type) {
1181 for (i = 0; i < sig->param_count; ++i) {
1182 switch (args [i].type) {
1186 if (!sig->params [i]->byref)
1190 if (sig->params [i]->byref)
1192 switch (sig->params [i]->type) {
1193 case MONO_TYPE_CLASS:
1194 case MONO_TYPE_STRING:
1195 case MONO_TYPE_OBJECT:
1196 case MONO_TYPE_SZARRAY:
1197 case MONO_TYPE_ARRAY:
1204 if (sig->params [i]->byref)
1206 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1215 /*if (!param_table [args [i].type] [sig->params [i]->type])
1223 * When we need a pointer to the current domain many times in a method, we
1224 * call mono_domain_get() once and we store the result in a local variable.
1225 * This function returns the variable that represents the MonoDomain*.
1227 inline static MonoInst *
1228 mono_get_domainvar (MonoCompile *cfg)
1230 if (!cfg->domainvar)
1231 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1232 return cfg->domainvar;
1236 * The got_var contains the address of the Global Offset Table when AOT
1240 mono_get_got_var (MonoCompile *cfg)
1242 #ifdef MONO_ARCH_NEED_GOT_VAR
1243 if (!cfg->compile_aot)
1245 if (!cfg->got_var) {
1246 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1248 return cfg->got_var;
1255 mono_get_vtable_var (MonoCompile *cfg)
1257 g_assert (cfg->generic_sharing_context);
1259 if (!cfg->rgctx_var) {
1260 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 /* force the var to be stack allocated */
1262 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1265 return cfg->rgctx_var;
1269 type_from_stack_type (MonoInst *ins) {
1270 switch (ins->type) {
1271 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1272 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1273 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1274 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1275 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1277 return &ins->klass->this_arg;
1278 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1279 case STACK_VTYPE: return &ins->klass->byval_arg;
1281 g_error ("stack type %d to monotype not handled\n", ins->type);
1286 static G_GNUC_UNUSED int
1287 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1289 t = mono_type_get_underlying_type (t);
1293 case MONO_TYPE_BOOLEAN:
1296 case MONO_TYPE_CHAR:
1303 case MONO_TYPE_FNPTR:
1305 case MONO_TYPE_CLASS:
1306 case MONO_TYPE_STRING:
1307 case MONO_TYPE_OBJECT:
1308 case MONO_TYPE_SZARRAY:
1309 case MONO_TYPE_ARRAY:
1315 return cfg->r4_stack_type;
1318 case MONO_TYPE_VALUETYPE:
1319 case MONO_TYPE_TYPEDBYREF:
1321 case MONO_TYPE_GENERICINST:
1322 if (mono_type_generic_inst_is_valuetype (t))
1328 g_assert_not_reached ();
1335 array_access_to_klass (int opcode)
1339 return mono_defaults.byte_class;
1341 return mono_defaults.uint16_class;
1344 return mono_defaults.int_class;
1347 return mono_defaults.sbyte_class;
1350 return mono_defaults.int16_class;
1353 return mono_defaults.int32_class;
1355 return mono_defaults.uint32_class;
1358 return mono_defaults.int64_class;
1361 return mono_defaults.single_class;
1364 return mono_defaults.double_class;
1365 case CEE_LDELEM_REF:
1366 case CEE_STELEM_REF:
1367 return mono_defaults.object_class;
1369 g_assert_not_reached ();
1375 * We try to share variables when possible
1378 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1383 /* inlining can result in deeper stacks */
1384 if (slot >= cfg->header->max_stack)
1385 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1387 pos = ins->type - 1 + slot * STACK_MAX;
1389 switch (ins->type) {
1396 if ((vnum = cfg->intvars [pos]))
1397 return cfg->varinfo [vnum];
1398 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1399 cfg->intvars [pos] = res->inst_c0;
1402 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1408 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1411 * Don't use this if a generic_context is set, since that means AOT can't
1412 * look up the method using just the image+token.
1413 * table == 0 means this is a reference made from a wrapper.
1415 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1416 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1417 jump_info_token->image = image;
1418 jump_info_token->token = token;
1419 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1424 * This function is called to handle items that are left on the evaluation stack
1425 * at basic block boundaries. What happens is that we save the values to local variables
1426 * and we reload them later when first entering the target basic block (with the
1427 * handle_loaded_temps () function).
1428 * A single joint point will use the same variables (stored in the array bb->out_stack or
1429 * bb->in_stack, if the basic block is before or after the joint point).
1431 * This function needs to be called _before_ emitting the last instruction of
1432 * the bb (i.e. before emitting a branch).
1433 * If the stack merge fails at a join point, cfg->unverifiable is set.
1436 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1439 MonoBasicBlock *bb = cfg->cbb;
1440 MonoBasicBlock *outb;
1441 MonoInst *inst, **locals;
1446 if (cfg->verbose_level > 3)
1447 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1448 if (!bb->out_scount) {
1449 bb->out_scount = count;
1450 //printf ("bblock %d has out:", bb->block_num);
1452 for (i = 0; i < bb->out_count; ++i) {
1453 outb = bb->out_bb [i];
1454 /* exception handlers are linked, but they should not be considered for stack args */
1455 if (outb->flags & BB_EXCEPTION_HANDLER)
1457 //printf (" %d", outb->block_num);
1458 if (outb->in_stack) {
1460 bb->out_stack = outb->in_stack;
1466 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1467 for (i = 0; i < count; ++i) {
1469 * try to reuse temps already allocated for this purpouse, if they occupy the same
1470 * stack slot and if they are of the same type.
1471 * This won't cause conflicts since if 'local' is used to
1472 * store one of the values in the in_stack of a bblock, then
1473 * the same variable will be used for the same outgoing stack
1475 * This doesn't work when inlining methods, since the bblocks
1476 * in the inlined methods do not inherit their in_stack from
1477 * the bblock they are inlined to. See bug #58863 for an
1480 if (cfg->inlined_method)
1481 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1483 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1488 for (i = 0; i < bb->out_count; ++i) {
1489 outb = bb->out_bb [i];
1490 /* exception handlers are linked, but they should not be considered for stack args */
1491 if (outb->flags & BB_EXCEPTION_HANDLER)
1493 if (outb->in_scount) {
1494 if (outb->in_scount != bb->out_scount) {
1495 cfg->unverifiable = TRUE;
1498 continue; /* check they are the same locals */
1500 outb->in_scount = count;
1501 outb->in_stack = bb->out_stack;
1504 locals = bb->out_stack;
1506 for (i = 0; i < count; ++i) {
1507 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1508 inst->cil_code = sp [i]->cil_code;
1509 sp [i] = locals [i];
1510 if (cfg->verbose_level > 3)
1511 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1515 * It is possible that the out bblocks already have in_stack assigned, and
1516 * the in_stacks differ. In this case, we will store to all the different
1523 /* Find a bblock which has a different in_stack */
1525 while (bindex < bb->out_count) {
1526 outb = bb->out_bb [bindex];
1527 /* exception handlers are linked, but they should not be considered for stack args */
1528 if (outb->flags & BB_EXCEPTION_HANDLER) {
1532 if (outb->in_stack != locals) {
1533 for (i = 0; i < count; ++i) {
1534 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1535 inst->cil_code = sp [i]->cil_code;
1536 sp [i] = locals [i];
1537 if (cfg->verbose_level > 3)
1538 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1540 locals = outb->in_stack;
1549 /* Emit code which loads interface_offsets [klass->interface_id]
1550 * The array is stored in memory before vtable.
1553 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1555 if (cfg->compile_aot) {
1556 int ioffset_reg = alloc_preg (cfg);
1557 int iid_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1569 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1571 int ibitmap_reg = alloc_preg (cfg);
1572 #ifdef COMPRESSED_INTERFACE_BITMAP
1574 MonoInst *res, *ins;
1575 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1576 MONO_ADD_INS (cfg->cbb, ins);
1578 if (cfg->compile_aot)
1579 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1581 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1582 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1583 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1585 int ibitmap_byte_reg = alloc_preg (cfg);
1587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1589 if (cfg->compile_aot) {
1590 int iid_reg = alloc_preg (cfg);
1591 int shifted_iid_reg = alloc_preg (cfg);
1592 int ibitmap_byte_address_reg = alloc_preg (cfg);
1593 int masked_iid_reg = alloc_preg (cfg);
1594 int iid_one_bit_reg = alloc_preg (cfg);
1595 int iid_bit_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1598 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1599 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1601 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1602 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1605 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1612 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1613 * stored in "klass_reg" implements the interface "klass".
1616 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1618 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1622 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1623 * stored in "vtable_reg" implements the interface "klass".
1626 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1628 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1632 * Emit code which checks whenever the interface id of @klass is smaller than
1633 * than the value given by max_iid_reg.
1636 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1637 MonoBasicBlock *false_target)
1639 if (cfg->compile_aot) {
1640 int iid_reg = alloc_preg (cfg);
1641 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1642 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1649 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1652 /* Same as above, but obtains max_iid from a vtable */
1654 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1655 MonoBasicBlock *false_target)
1657 int max_iid_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1660 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1663 /* Same as above, but obtains max_iid from a klass */
1665 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1666 MonoBasicBlock *false_target)
1668 int max_iid_reg = alloc_preg (cfg);
1670 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1671 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1675 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1677 int idepth_reg = alloc_preg (cfg);
1678 int stypes_reg = alloc_preg (cfg);
1679 int stype = alloc_preg (cfg);
1681 mono_class_setup_supertypes (klass);
1683 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1684 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1685 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1686 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1692 } else if (cfg->compile_aot) {
1693 int const_reg = alloc_preg (cfg);
1694 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1695 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1699 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1703 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1705 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1709 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1711 int intf_reg = alloc_preg (cfg);
1713 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1714 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1715 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1719 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1723 * Variant of the above that takes a register to the class, not the vtable.
1726 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1728 int intf_bit_reg = alloc_preg (cfg);
1730 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1731 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1732 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1736 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1740 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1743 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1744 } else if (cfg->compile_aot) {
1745 int const_reg = alloc_preg (cfg);
1746 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1751 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1755 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1757 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1761 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1763 if (cfg->compile_aot) {
1764 int const_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1766 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1770 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1774 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1777 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1780 int rank_reg = alloc_preg (cfg);
1781 int eclass_reg = alloc_preg (cfg);
1783 g_assert (!klass_inst);
1784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1786 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1787 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1789 if (klass->cast_class == mono_defaults.object_class) {
1790 int parent_reg = alloc_preg (cfg);
1791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1792 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1793 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1794 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1795 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1796 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1797 } else if (klass->cast_class == mono_defaults.enum_class) {
1798 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1799 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1800 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1802 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1803 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1806 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1807 /* Check that the object is a vector too */
1808 int bounds_reg = alloc_preg (cfg);
1809 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1811 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1814 int idepth_reg = alloc_preg (cfg);
1815 int stypes_reg = alloc_preg (cfg);
1816 int stype = alloc_preg (cfg);
1818 mono_class_setup_supertypes (klass);
1820 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1821 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1822 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1823 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1826 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1827 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1832 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1834 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1838 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1842 g_assert (val == 0);
1847 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1850 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1856 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1858 #if SIZEOF_REGISTER == 8
1860 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1866 val_reg = alloc_preg (cfg);
1868 if (SIZEOF_REGISTER == 8)
1869 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1871 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1874 /* This could be optimized further if neccesary */
1876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1883 #if !NO_UNALIGNED_ACCESS
1884 if (SIZEOF_REGISTER == 8) {
1886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1899 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1904 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1916 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1923 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1924 g_assert (size < 10000);
1927 /* This could be optimized further if neccesary */
1929 cur_reg = alloc_preg (cfg);
1930 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1931 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1938 #if !NO_UNALIGNED_ACCESS
1939 if (SIZEOF_REGISTER == 8) {
1941 cur_reg = alloc_preg (cfg);
1942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1952 cur_reg = alloc_preg (cfg);
1953 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1954 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1960 cur_reg = alloc_preg (cfg);
1961 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1962 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1968 cur_reg = alloc_preg (cfg);
1969 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1970 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1978 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1982 if (cfg->compile_aot) {
1983 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1984 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1986 ins->sreg2 = c->dreg;
1987 MONO_ADD_INS (cfg->cbb, ins);
1989 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1991 ins->inst_offset = mini_get_tls_offset (tls_key);
1992 MONO_ADD_INS (cfg->cbb, ins);
1999 * Emit IR to push the current LMF onto the LMF stack.
2002 emit_push_lmf (MonoCompile *cfg)
2005 * Emit IR to push the LMF:
2006 * lmf_addr = <lmf_addr from tls>
2007 * lmf->lmf_addr = lmf_addr
2008 * lmf->prev_lmf = *lmf_addr
2011 int lmf_reg, prev_lmf_reg;
2012 MonoInst *ins, *lmf_ins;
2017 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2018 /* Load current lmf */
2019 lmf_ins = mono_get_lmf_intrinsic (cfg);
2021 MONO_ADD_INS (cfg->cbb, lmf_ins);
2022 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2023 lmf_reg = ins->dreg;
2024 /* Save previous_lmf */
2025 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2027 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2030 * Store lmf_addr in a variable, so it can be allocated to a global register.
2032 if (!cfg->lmf_addr_var)
2033 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2036 ins = mono_get_jit_tls_intrinsic (cfg);
2038 int jit_tls_dreg = ins->dreg;
2040 MONO_ADD_INS (cfg->cbb, ins);
2041 lmf_reg = alloc_preg (cfg);
2042 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2044 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2047 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2049 MONO_ADD_INS (cfg->cbb, lmf_ins);
2052 MonoInst *args [16], *jit_tls_ins, *ins;
2054 /* Inline mono_get_lmf_addr () */
2055 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2057 /* Load mono_jit_tls_id */
2058 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2059 /* call pthread_getspecific () */
2060 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2061 /* lmf_addr = &jit_tls->lmf */
2062 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2065 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2069 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2071 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2072 lmf_reg = ins->dreg;
2074 prev_lmf_reg = alloc_preg (cfg);
2075 /* Save previous_lmf */
2076 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2077 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2079 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2086 * Emit IR to pop the current LMF from the LMF stack.
2089 emit_pop_lmf (MonoCompile *cfg)
2091 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2097 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2098 lmf_reg = ins->dreg;
2100 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2101 /* Load previous_lmf */
2102 prev_lmf_reg = alloc_preg (cfg);
2103 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2105 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2108 * Emit IR to pop the LMF:
2109 * *(lmf->lmf_addr) = lmf->prev_lmf
2111 /* This could be called before emit_push_lmf () */
2112 if (!cfg->lmf_addr_var)
2113 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2114 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2116 prev_lmf_reg = alloc_preg (cfg);
2117 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2118 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2123 emit_instrumentation_call (MonoCompile *cfg, void *func)
2125 MonoInst *iargs [1];
2128 * Avoid instrumenting inlined methods since it can
2129 * distort profiling results.
2131 if (cfg->method != cfg->current_method)
2134 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2135 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2136 mono_emit_jit_icall (cfg, func, iargs);
2141 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2144 type = mini_get_underlying_type (cfg, type);
2145 switch (type->type) {
2146 case MONO_TYPE_VOID:
2147 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2150 case MONO_TYPE_BOOLEAN:
2153 case MONO_TYPE_CHAR:
2156 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2160 case MONO_TYPE_FNPTR:
2161 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2162 case MONO_TYPE_CLASS:
2163 case MONO_TYPE_STRING:
2164 case MONO_TYPE_OBJECT:
2165 case MONO_TYPE_SZARRAY:
2166 case MONO_TYPE_ARRAY:
2167 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2170 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2173 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2175 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2177 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2178 case MONO_TYPE_VALUETYPE:
2179 if (type->data.klass->enumtype) {
2180 type = mono_class_enum_basetype (type->data.klass);
2183 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2184 case MONO_TYPE_TYPEDBYREF:
2185 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2186 case MONO_TYPE_GENERICINST:
2187 type = &type->data.generic_class->container_class->byval_arg;
2190 case MONO_TYPE_MVAR:
2192 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2194 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2200 * target_type_is_incompatible:
2201 * @cfg: MonoCompile context
2203 * Check that the item @arg on the evaluation stack can be stored
2204 * in the target type (can be a local, or field, etc).
2205 * The cfg arg can be used to check if we need verification or just
2208 * Returns: non-0 value if arg can't be stored on a target.
2211 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2213 MonoType *simple_type;
2216 if (target->byref) {
2217 /* FIXME: check that the pointed to types match */
2218 if (arg->type == STACK_MP)
2219 return arg->klass != mono_class_from_mono_type (target);
2220 if (arg->type == STACK_PTR)
2225 simple_type = mini_get_underlying_type (cfg, target);
2226 switch (simple_type->type) {
2227 case MONO_TYPE_VOID:
2231 case MONO_TYPE_BOOLEAN:
2234 case MONO_TYPE_CHAR:
2237 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2241 /* STACK_MP is needed when setting pinned locals */
2242 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2247 case MONO_TYPE_FNPTR:
2249 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2250 * in native int. (#688008).
2252 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2255 case MONO_TYPE_CLASS:
2256 case MONO_TYPE_STRING:
2257 case MONO_TYPE_OBJECT:
2258 case MONO_TYPE_SZARRAY:
2259 case MONO_TYPE_ARRAY:
2260 if (arg->type != STACK_OBJ)
2262 /* FIXME: check type compatibility */
2266 if (arg->type != STACK_I8)
2270 if (arg->type != cfg->r4_stack_type)
2274 if (arg->type != STACK_R8)
2277 case MONO_TYPE_VALUETYPE:
2278 if (arg->type != STACK_VTYPE)
2280 klass = mono_class_from_mono_type (simple_type);
2281 if (klass != arg->klass)
2284 case MONO_TYPE_TYPEDBYREF:
2285 if (arg->type != STACK_VTYPE)
2287 klass = mono_class_from_mono_type (simple_type);
2288 if (klass != arg->klass)
2291 case MONO_TYPE_GENERICINST:
2292 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2293 if (arg->type != STACK_VTYPE)
2295 klass = mono_class_from_mono_type (simple_type);
2296 if (klass != arg->klass)
2300 if (arg->type != STACK_OBJ)
2302 /* FIXME: check type compatibility */
2306 case MONO_TYPE_MVAR:
2307 g_assert (cfg->generic_sharing_context);
2308 if (mini_type_var_is_vt (cfg, simple_type)) {
2309 if (arg->type != STACK_VTYPE)
2312 if (arg->type != STACK_OBJ)
2317 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2323 * Prepare arguments for passing to a function call.
2324 * Return a non-zero value if the arguments can't be passed to the given
2326 * The type checks are not yet complete and some conversions may need
2327 * casts on 32 or 64 bit architectures.
2329 * FIXME: implement this using target_type_is_incompatible ()
2332 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2334 MonoType *simple_type;
2338 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2342 for (i = 0; i < sig->param_count; ++i) {
2343 if (sig->params [i]->byref) {
2344 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2348 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2350 switch (simple_type->type) {
2351 case MONO_TYPE_VOID:
2356 case MONO_TYPE_BOOLEAN:
2359 case MONO_TYPE_CHAR:
2362 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2368 case MONO_TYPE_FNPTR:
2369 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2372 case MONO_TYPE_CLASS:
2373 case MONO_TYPE_STRING:
2374 case MONO_TYPE_OBJECT:
2375 case MONO_TYPE_SZARRAY:
2376 case MONO_TYPE_ARRAY:
2377 if (args [i]->type != STACK_OBJ)
2382 if (args [i]->type != STACK_I8)
2386 if (args [i]->type != cfg->r4_stack_type)
2390 if (args [i]->type != STACK_R8)
2393 case MONO_TYPE_VALUETYPE:
2394 if (simple_type->data.klass->enumtype) {
2395 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2398 if (args [i]->type != STACK_VTYPE)
2401 case MONO_TYPE_TYPEDBYREF:
2402 if (args [i]->type != STACK_VTYPE)
2405 case MONO_TYPE_GENERICINST:
2406 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2409 case MONO_TYPE_MVAR:
2411 if (args [i]->type != STACK_VTYPE)
2415 g_error ("unknown type 0x%02x in check_call_signature",
2423 callvirt_to_call (int opcode)
2426 case OP_CALL_MEMBASE:
2428 case OP_VOIDCALL_MEMBASE:
2430 case OP_FCALL_MEMBASE:
2432 case OP_RCALL_MEMBASE:
2434 case OP_VCALL_MEMBASE:
2436 case OP_LCALL_MEMBASE:
2439 g_assert_not_reached ();
2445 /* Either METHOD or IMT_ARG needs to be set */
2447 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2451 if (COMPILE_LLVM (cfg)) {
2452 method_reg = alloc_preg (cfg);
2455 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2456 } else if (cfg->compile_aot) {
2457 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2460 MONO_INST_NEW (cfg, ins, OP_PCONST);
2461 ins->inst_p0 = method;
2462 ins->dreg = method_reg;
2463 MONO_ADD_INS (cfg->cbb, ins);
2467 call->imt_arg_reg = method_reg;
2469 #ifdef MONO_ARCH_IMT_REG
2470 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2472 /* Need this to keep the IMT arg alive */
2473 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2478 #ifdef MONO_ARCH_IMT_REG
2479 method_reg = alloc_preg (cfg);
2482 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2483 } else if (cfg->compile_aot) {
2484 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2487 MONO_INST_NEW (cfg, ins, OP_PCONST);
2488 ins->inst_p0 = method;
2489 ins->dreg = method_reg;
2490 MONO_ADD_INS (cfg->cbb, ins);
2493 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2495 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2499 static MonoJumpInfo *
2500 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2502 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2506 ji->data.target = target;
2512 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2514 if (cfg->generic_sharing_context)
2515 return mono_class_check_context_used (klass);
2521 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2523 if (cfg->generic_sharing_context)
2524 return mono_method_check_context_used (method);
2530 * check_method_sharing:
2532 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2535 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2537 gboolean pass_vtable = FALSE;
2538 gboolean pass_mrgctx = FALSE;
2540 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2541 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2542 gboolean sharable = FALSE;
2544 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2547 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2548 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2549 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2551 sharable = sharing_enabled && context_sharable;
2555 * Pass vtable iff target method might
2556 * be shared, which means that sharing
2557 * is enabled for its class and its
2558 * context is sharable (and it's not a
2561 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2565 if (mini_method_get_context (cmethod) &&
2566 mini_method_get_context (cmethod)->method_inst) {
2567 g_assert (!pass_vtable);
2569 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2572 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2573 MonoGenericContext *context = mini_method_get_context (cmethod);
2574 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2576 if (sharing_enabled && context_sharable)
2578 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2583 if (out_pass_vtable)
2584 *out_pass_vtable = pass_vtable;
2585 if (out_pass_mrgctx)
2586 *out_pass_mrgctx = pass_mrgctx;
2589 inline static MonoCallInst *
2590 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2591 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2595 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2600 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2602 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2604 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2607 call->signature = sig;
2608 call->rgctx_reg = rgctx;
2609 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2611 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2614 if (mini_type_is_vtype (cfg, sig_ret)) {
2615 call->vret_var = cfg->vret_addr;
2616 //g_assert_not_reached ();
2618 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2619 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2622 temp->backend.is_pinvoke = sig->pinvoke;
2625 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2626 * address of return value to increase optimization opportunities.
2627 * Before vtype decomposition, the dreg of the call ins itself represents the
2628 * fact the call modifies the return value. After decomposition, the call will
2629 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2630 * will be transformed into an LDADDR.
2632 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2633 loada->dreg = alloc_preg (cfg);
2634 loada->inst_p0 = temp;
2635 /* We reference the call too since call->dreg could change during optimization */
2636 loada->inst_p1 = call;
2637 MONO_ADD_INS (cfg->cbb, loada);
2639 call->inst.dreg = temp->dreg;
2641 call->vret_var = loada;
2642 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2643 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2645 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2646 if (COMPILE_SOFT_FLOAT (cfg)) {
2648 * If the call has a float argument, we would need to do an r8->r4 conversion using
2649 * an icall, but that cannot be done during the call sequence since it would clobber
2650 * the call registers + the stack. So we do it before emitting the call.
2652 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2654 MonoInst *in = call->args [i];
2656 if (i >= sig->hasthis)
2657 t = sig->params [i - sig->hasthis];
2659 t = &mono_defaults.int_class->byval_arg;
2660 t = mono_type_get_underlying_type (t);
2662 if (!t->byref && t->type == MONO_TYPE_R4) {
2663 MonoInst *iargs [1];
2667 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2669 /* The result will be in an int vreg */
2670 call->args [i] = conv;
2676 call->need_unbox_trampoline = unbox_trampoline;
2679 if (COMPILE_LLVM (cfg))
2680 mono_llvm_emit_call (cfg, call);
2682 mono_arch_emit_call (cfg, call);
2684 mono_arch_emit_call (cfg, call);
2687 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2688 cfg->flags |= MONO_CFG_HAS_CALLS;
2694 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2696 #ifdef MONO_ARCH_RGCTX_REG
2697 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2698 cfg->uses_rgctx_reg = TRUE;
2699 call->rgctx_reg = TRUE;
2701 call->rgctx_arg_reg = rgctx_reg;
2708 inline static MonoInst*
2709 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2714 gboolean check_sp = FALSE;
2716 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2717 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2719 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2724 rgctx_reg = mono_alloc_preg (cfg);
2725 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2729 if (!cfg->stack_inbalance_var)
2730 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2732 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2733 ins->dreg = cfg->stack_inbalance_var->dreg;
2734 MONO_ADD_INS (cfg->cbb, ins);
2737 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2739 call->inst.sreg1 = addr->dreg;
2742 emit_imt_argument (cfg, call, NULL, imt_arg);
2744 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2749 sp_reg = mono_alloc_preg (cfg);
2751 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2753 MONO_ADD_INS (cfg->cbb, ins);
2755 /* Restore the stack so we don't crash when throwing the exception */
2756 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2757 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2758 MONO_ADD_INS (cfg->cbb, ins);
2760 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2761 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2765 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2767 return (MonoInst*)call;
2771 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2774 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2776 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2779 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2780 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2782 #ifndef DISABLE_REMOTING
2783 gboolean might_be_remote = FALSE;
2785 gboolean virtual = this != NULL;
2786 gboolean enable_for_aot = TRUE;
2790 gboolean need_unbox_trampoline;
2793 sig = mono_method_signature (method);
2796 rgctx_reg = mono_alloc_preg (cfg);
2797 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2800 if (method->string_ctor) {
2801 /* Create the real signature */
2802 /* FIXME: Cache these */
2803 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2804 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2809 context_used = mini_method_check_context_used (cfg, method);
2811 #ifndef DISABLE_REMOTING
2812 might_be_remote = this && sig->hasthis &&
2813 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2814 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2816 if (might_be_remote && context_used) {
2819 g_assert (cfg->generic_sharing_context);
2821 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2823 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2827 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2829 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2831 #ifndef DISABLE_REMOTING
2832 if (might_be_remote)
2833 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2836 call->method = method;
2837 call->inst.flags |= MONO_INST_HAS_METHOD;
2838 call->inst.inst_left = this;
2839 call->tail_call = tail;
2842 int vtable_reg, slot_reg, this_reg;
2845 this_reg = this->dreg;
2847 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2848 MonoInst *dummy_use;
2850 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2852 /* Make a call to delegate->invoke_impl */
2853 call->inst.inst_basereg = this_reg;
2854 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2855 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2857 /* We must emit a dummy use here because the delegate trampoline will
2858 replace the 'this' argument with the delegate target making this activation
2859 no longer a root for the delegate.
2860 This is an issue for delegates that target collectible code such as dynamic
2861 methods of GC'able assemblies.
2863 For a test case look into #667921.
2865 FIXME: a dummy use is not the best way to do it as the local register allocator
2866 will put it on a caller save register and spil it around the call.
2867 Ideally, we would either put it on a callee save register or only do the store part.
2869 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2871 return (MonoInst*)call;
2874 if ((!cfg->compile_aot || enable_for_aot) &&
2875 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2876 (MONO_METHOD_IS_FINAL (method) &&
2877 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2878 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2880 * the method is not virtual, we just need to ensure this is not null
2881 * and then we can call the method directly.
2883 #ifndef DISABLE_REMOTING
2884 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2886 * The check above ensures method is not gshared, this is needed since
2887 * gshared methods can't have wrappers.
2889 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2893 if (!method->string_ctor)
2894 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2896 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2897 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2899 * the method is virtual, but we can statically dispatch since either
2900 * it's class or the method itself are sealed.
2901 * But first we need to ensure it's not a null reference.
2903 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2905 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2907 vtable_reg = alloc_preg (cfg);
2908 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2909 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2912 guint32 imt_slot = mono_method_get_imt_slot (method);
2913 emit_imt_argument (cfg, call, call->method, imt_arg);
2914 slot_reg = vtable_reg;
2915 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2917 if (slot_reg == -1) {
2918 slot_reg = alloc_preg (cfg);
2919 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2920 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2923 slot_reg = vtable_reg;
2924 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2925 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2927 g_assert (mono_method_signature (method)->generic_param_count);
2928 emit_imt_argument (cfg, call, call->method, imt_arg);
2932 call->inst.sreg1 = slot_reg;
2933 call->inst.inst_offset = offset;
2934 call->virtual = TRUE;
2938 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2941 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2943 return (MonoInst*)call;
2947 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2949 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2953 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2960 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2963 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2965 return (MonoInst*)call;
2969 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2971 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2975 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2979 * mono_emit_abs_call:
2981 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2983 inline static MonoInst*
2984 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2985 MonoMethodSignature *sig, MonoInst **args)
2987 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2991 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2994 if (cfg->abs_patches == NULL)
2995 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2996 g_hash_table_insert (cfg->abs_patches, ji, ji);
2997 ins = mono_emit_native_call (cfg, ji, sig, args);
2998 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3003 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
3005 gboolean no_wrapper = FALSE;
3008 * Call the jit icall without a wrapper if possible.
3009 * The wrapper is needed for the following reasons:
3010 * - to handle exceptions thrown using mono_raise_exceptions () from the
3011 * icall function. The EH code needs the lmf frame pushed by the
3012 * wrapper to be able to unwind back to managed code.
3013 * - to be able to do stack walks for asynchronously suspended
3014 * threads when debugging.
3016 if (info->no_raise) {
3017 if (cfg->compile_aot) {
3018 // FIXME: This might be loaded into a runtime during debugging
3019 // even if it is not compiled using 'soft-debug'.
3022 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3023 if ((cfg->compile_llvm && SIZEOF_VOID_P == 8) || cfg->gen_seq_points_debug_data)
3032 if (!info->wrapper_method) {
3033 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3034 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3036 mono_memory_barrier ();
3040 * Inline the wrapper method, which is basically a call to the C icall, and
3041 * an exception check.
3043 costs = inline_method (cfg, info->wrapper_method, NULL,
3044 args, NULL, cfg->real_offset, TRUE, out_cbb);
3045 g_assert (costs > 0);
3046 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3050 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3055 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3057 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3058 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3062 * Native code might return non register sized integers
3063 * without initializing the upper bits.
3065 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3066 case OP_LOADI1_MEMBASE:
3067 widen_op = OP_ICONV_TO_I1;
3069 case OP_LOADU1_MEMBASE:
3070 widen_op = OP_ICONV_TO_U1;
3072 case OP_LOADI2_MEMBASE:
3073 widen_op = OP_ICONV_TO_I2;
3075 case OP_LOADU2_MEMBASE:
3076 widen_op = OP_ICONV_TO_U2;
3082 if (widen_op != -1) {
3083 int dreg = alloc_preg (cfg);
3086 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3087 widen->type = ins->type;
3097 get_memcpy_method (void)
3099 static MonoMethod *memcpy_method = NULL;
3100 if (!memcpy_method) {
3101 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3103 g_error ("Old corlib found. Install a new one");
3105 return memcpy_method;
3109 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3111 MonoClassField *field;
3112 gpointer iter = NULL;
3114 while ((field = mono_class_get_fields (klass, &iter))) {
3117 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3119 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3120 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3121 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3122 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3124 MonoClass *field_class = mono_class_from_mono_type (field->type);
3125 if (field_class->has_references)
3126 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3132 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3134 int card_table_shift_bits;
3135 gpointer card_table_mask;
3137 MonoInst *dummy_use;
3138 int nursery_shift_bits;
3139 size_t nursery_size;
3140 gboolean has_card_table_wb = FALSE;
3142 if (!cfg->gen_write_barriers)
3145 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3147 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3149 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3150 has_card_table_wb = TRUE;
3153 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3156 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3157 wbarrier->sreg1 = ptr->dreg;
3158 wbarrier->sreg2 = value->dreg;
3159 MONO_ADD_INS (cfg->cbb, wbarrier);
3160 } else if (card_table) {
3161 int offset_reg = alloc_preg (cfg);
3162 int card_reg = alloc_preg (cfg);
3165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3166 if (card_table_mask)
3167 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3169 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3170 * IMM's larger than 32bits.
3172 if (cfg->compile_aot) {
3173 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3175 MONO_INST_NEW (cfg, ins, OP_PCONST);
3176 ins->inst_p0 = card_table;
3177 ins->dreg = card_reg;
3178 MONO_ADD_INS (cfg->cbb, ins);
3181 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3182 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3184 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3185 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3188 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3192 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3194 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3195 unsigned need_wb = 0;
3200 /*types with references can't have alignment smaller than sizeof(void*) */
3201 if (align < SIZEOF_VOID_P)
3204 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3205 if (size > 32 * SIZEOF_VOID_P)
3208 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3210 /* We don't unroll more than 5 stores to avoid code bloat. */
3211 if (size > 5 * SIZEOF_VOID_P) {
3212 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3213 size += (SIZEOF_VOID_P - 1);
3214 size &= ~(SIZEOF_VOID_P - 1);
3216 EMIT_NEW_ICONST (cfg, iargs [2], size);
3217 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3218 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3222 destreg = iargs [0]->dreg;
3223 srcreg = iargs [1]->dreg;
3226 dest_ptr_reg = alloc_preg (cfg);
3227 tmp_reg = alloc_preg (cfg);
3230 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3232 while (size >= SIZEOF_VOID_P) {
3233 MonoInst *load_inst;
3234 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3235 load_inst->dreg = tmp_reg;
3236 load_inst->inst_basereg = srcreg;
3237 load_inst->inst_offset = offset;
3238 MONO_ADD_INS (cfg->cbb, load_inst);
3240 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3243 emit_write_barrier (cfg, iargs [0], load_inst);
3245 offset += SIZEOF_VOID_P;
3246 size -= SIZEOF_VOID_P;
3249 /*tmp += sizeof (void*)*/
3250 if (size >= SIZEOF_VOID_P) {
3251 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3252 MONO_ADD_INS (cfg->cbb, iargs [0]);
3256 /* Those cannot be references since size < sizeof (void*) */
3258 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3259 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3265 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3266 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3272 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3273 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3282 * Emit code to copy a valuetype of type @klass whose address is stored in
3283 * @src->dreg to memory whose address is stored at @dest->dreg.
3286 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3288 MonoInst *iargs [4];
3289 int context_used, n;
3291 MonoMethod *memcpy_method;
3292 MonoInst *size_ins = NULL;
3293 MonoInst *memcpy_ins = NULL;
3296 if (cfg->generic_sharing_context)
3297 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3300 * This check breaks with spilled vars... need to handle it during verification anyway.
3301 * g_assert (klass && klass == src->klass && klass == dest->klass);
3304 if (mini_is_gsharedvt_klass (cfg, klass)) {
3306 context_used = mini_class_check_context_used (cfg, klass);
3307 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3308 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3312 n = mono_class_native_size (klass, &align);
3314 n = mono_class_value_size (klass, &align);
3316 /* if native is true there should be no references in the struct */
3317 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3318 /* Avoid barriers when storing to the stack */
3319 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3320 (dest->opcode == OP_LDADDR))) {
3326 context_used = mini_class_check_context_used (cfg, klass);
3328 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3329 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3331 } else if (context_used) {
3332 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3334 if (cfg->compile_aot) {
3335 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3337 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3338 mono_class_compute_gc_descriptor (klass);
3343 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3345 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3350 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3351 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3352 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3357 iargs [2] = size_ins;
3359 EMIT_NEW_ICONST (cfg, iargs [2], n);
3361 memcpy_method = get_memcpy_method ();
3363 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3365 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3370 get_memset_method (void)
3372 static MonoMethod *memset_method = NULL;
3373 if (!memset_method) {
3374 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3376 g_error ("Old corlib found. Install a new one");
3378 return memset_method;
3382 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3384 MonoInst *iargs [3];
3385 int n, context_used;
3387 MonoMethod *memset_method;
3388 MonoInst *size_ins = NULL;
3389 MonoInst *bzero_ins = NULL;
3390 static MonoMethod *bzero_method;
3392 /* FIXME: Optimize this for the case when dest is an LDADDR */
3393 mono_class_init (klass);
3394 if (mini_is_gsharedvt_klass (cfg, klass)) {
3395 context_used = mini_class_check_context_used (cfg, klass);
3396 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3397 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3399 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3400 g_assert (bzero_method);
3402 iargs [1] = size_ins;
3403 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3407 n = mono_class_value_size (klass, &align);
3409 if (n <= sizeof (gpointer) * 8) {
3410 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3413 memset_method = get_memset_method ();
3415 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3416 EMIT_NEW_ICONST (cfg, iargs [2], n);
3417 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3422 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3424 MonoInst *this = NULL;
3426 g_assert (cfg->generic_sharing_context);
3428 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3429 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3430 !method->klass->valuetype)
3431 EMIT_NEW_ARGLOAD (cfg, this, 0);
3433 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3434 MonoInst *mrgctx_loc, *mrgctx_var;
3437 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3439 mrgctx_loc = mono_get_vtable_var (cfg);
3440 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3443 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3444 MonoInst *vtable_loc, *vtable_var;
3448 vtable_loc = mono_get_vtable_var (cfg);
3449 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3451 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3452 MonoInst *mrgctx_var = vtable_var;
3455 vtable_reg = alloc_preg (cfg);
3456 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3457 vtable_var->type = STACK_PTR;
3465 vtable_reg = alloc_preg (cfg);
3466 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3471 static MonoJumpInfoRgctxEntry *
3472 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3474 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3475 res->method = method;
3476 res->in_mrgctx = in_mrgctx;
3477 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3478 res->data->type = patch_type;
3479 res->data->data.target = patch_data;
3480 res->info_type = info_type;
3485 static inline MonoInst*
3486 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3488 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3492 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3493 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3495 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3496 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3498 return emit_rgctx_fetch (cfg, rgctx, entry);
3502 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3503 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3505 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3506 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3508 return emit_rgctx_fetch (cfg, rgctx, entry);
3512 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3513 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3515 MonoJumpInfoGSharedVtCall *call_info;
3516 MonoJumpInfoRgctxEntry *entry;
3519 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3520 call_info->sig = sig;
3521 call_info->method = cmethod;
3523 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3524 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3526 return emit_rgctx_fetch (cfg, rgctx, entry);
3531 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3532 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3534 MonoJumpInfoRgctxEntry *entry;
3537 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3538 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3540 return emit_rgctx_fetch (cfg, rgctx, entry);
3544 * emit_get_rgctx_method:
3546 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3547 * normal constants, else emit a load from the rgctx.
3550 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3551 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3553 if (!context_used) {
3556 switch (rgctx_type) {
3557 case MONO_RGCTX_INFO_METHOD:
3558 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3560 case MONO_RGCTX_INFO_METHOD_RGCTX:
3561 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3564 g_assert_not_reached ();
3567 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3568 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3570 return emit_rgctx_fetch (cfg, rgctx, entry);
3575 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3576 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3578 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3579 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3581 return emit_rgctx_fetch (cfg, rgctx, entry);
3585 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3587 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3588 MonoRuntimeGenericContextInfoTemplate *template;
3593 for (i = 0; i < info->num_entries; ++i) {
3594 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3596 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3600 if (info->num_entries == info->count_entries) {
3601 MonoRuntimeGenericContextInfoTemplate *new_entries;
3602 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3604 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3606 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3607 info->entries = new_entries;
3608 info->count_entries = new_count_entries;
3611 idx = info->num_entries;
3612 template = &info->entries [idx];
3613 template->info_type = rgctx_type;
3614 template->data = data;
3616 info->num_entries ++;
3622 * emit_get_gsharedvt_info:
3624 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3627 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3632 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3633 /* Load info->entries [idx] */
3634 dreg = alloc_preg (cfg);
3635 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3641 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3643 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3647 * On return the caller must check @klass for load errors.
3650 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3652 MonoInst *vtable_arg;
3656 context_used = mini_class_check_context_used (cfg, klass);
3659 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3660 klass, MONO_RGCTX_INFO_VTABLE);
3662 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3666 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3669 if (COMPILE_LLVM (cfg))
3670 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3672 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3673 #ifdef MONO_ARCH_VTABLE_REG
3674 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3675 cfg->uses_vtable_reg = TRUE;
3682 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3686 if (cfg->gen_seq_points && cfg->method == method) {
3687 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3689 ins->flags |= MONO_INST_NONEMPTY_STACK;
3690 MONO_ADD_INS (cfg->cbb, ins);
3695 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3697 if (mini_get_debug_options ()->better_cast_details) {
3698 int vtable_reg = alloc_preg (cfg);
3699 int klass_reg = alloc_preg (cfg);
3700 MonoBasicBlock *is_null_bb = NULL;
3702 int to_klass_reg, context_used;
3705 NEW_BBLOCK (cfg, is_null_bb);
3707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3708 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3711 tls_get = mono_get_jit_tls_intrinsic (cfg);
3713 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3717 MONO_ADD_INS (cfg->cbb, tls_get);
3718 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3721 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3723 context_used = mini_class_check_context_used (cfg, klass);
3725 MonoInst *class_ins;
3727 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3728 to_klass_reg = class_ins->dreg;
3730 to_klass_reg = alloc_preg (cfg);
3731 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3736 MONO_START_BB (cfg, is_null_bb);
3738 *out_bblock = cfg->cbb;
3744 reset_cast_details (MonoCompile *cfg)
3746 /* Reset the variables holding the cast details */
3747 if (mini_get_debug_options ()->better_cast_details) {
3748 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3750 MONO_ADD_INS (cfg->cbb, tls_get);
3751 /* It is enough to reset the from field */
3752 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3757 * On return the caller must check @array_class for load errors
3760 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3762 int vtable_reg = alloc_preg (cfg);
3765 context_used = mini_class_check_context_used (cfg, array_class);
3767 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3769 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3771 if (cfg->opt & MONO_OPT_SHARED) {
3772 int class_reg = alloc_preg (cfg);
3773 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3774 if (cfg->compile_aot) {
3775 int klass_reg = alloc_preg (cfg);
3776 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3777 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3781 } else if (context_used) {
3782 MonoInst *vtable_ins;
3784 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3785 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3787 if (cfg->compile_aot) {
3791 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3793 vt_reg = alloc_preg (cfg);
3794 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3795 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3798 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3800 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3804 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3806 reset_cast_details (cfg);
3810 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3811 * generic code is generated.
3814 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3816 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3819 MonoInst *rgctx, *addr;
3821 /* FIXME: What if the class is shared? We might not
3822 have to get the address of the method from the
3824 addr = emit_get_rgctx_method (cfg, context_used, method,
3825 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3827 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3829 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3831 gboolean pass_vtable, pass_mrgctx;
3832 MonoInst *rgctx_arg = NULL;
3834 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3835 g_assert (!pass_mrgctx);
3838 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3841 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3844 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3849 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3853 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3854 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3855 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3856 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3858 obj_reg = sp [0]->dreg;
3859 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3860 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3862 /* FIXME: generics */
3863 g_assert (klass->rank == 0);
3866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3867 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3869 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3870 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3873 MonoInst *element_class;
3875 /* This assertion is from the unboxcast insn */
3876 g_assert (klass->rank == 0);
3878 element_class = emit_get_rgctx_klass (cfg, context_used,
3879 klass->element_class, MONO_RGCTX_INFO_KLASS);
3881 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3882 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3884 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3885 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3886 reset_cast_details (cfg);
3889 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3890 MONO_ADD_INS (cfg->cbb, add);
3891 add->type = STACK_MP;
3898 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3900 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3901 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3905 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3911 args [1] = klass_inst;
3914 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3916 NEW_BBLOCK (cfg, is_ref_bb);
3917 NEW_BBLOCK (cfg, is_nullable_bb);
3918 NEW_BBLOCK (cfg, end_bb);
3919 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3921 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3924 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3926 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3927 addr_reg = alloc_dreg (cfg, STACK_MP);
3931 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3932 MONO_ADD_INS (cfg->cbb, addr);
3934 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3937 MONO_START_BB (cfg, is_ref_bb);
3939 /* Save the ref to a temporary */
3940 dreg = alloc_ireg (cfg);
3941 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3942 addr->dreg = addr_reg;
3943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3947 MONO_START_BB (cfg, is_nullable_bb);
3950 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3951 MonoInst *unbox_call;
3952 MonoMethodSignature *unbox_sig;
3955 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3957 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3958 unbox_sig->ret = &klass->byval_arg;
3959 unbox_sig->param_count = 1;
3960 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3961 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3963 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3964 addr->dreg = addr_reg;
3967 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3970 MONO_START_BB (cfg, end_bb);
3973 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3975 *out_cbb = cfg->cbb;
3981 * Returns NULL and set the cfg exception on error.
3984 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3986 MonoInst *iargs [2];
3992 MonoInst *iargs [2];
3993 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3995 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3997 if (cfg->opt & MONO_OPT_SHARED)
3998 rgctx_info = MONO_RGCTX_INFO_KLASS;
4000 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4001 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4003 if (cfg->opt & MONO_OPT_SHARED) {
4004 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4006 alloc_ftn = mono_object_new;
4009 alloc_ftn = mono_object_new_specific;
4012 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4013 if (known_instance_size)
4014 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
4015 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4018 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4021 if (cfg->opt & MONO_OPT_SHARED) {
4022 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4023 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4025 alloc_ftn = mono_object_new;
4026 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4027 /* This happens often in argument checking code, eg. throw new FooException... */
4028 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4029 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4030 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4032 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4033 MonoMethod *managed_alloc = NULL;
4037 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4038 cfg->exception_ptr = klass;
4042 #ifndef MONO_CROSS_COMPILE
4043 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4046 if (managed_alloc) {
4047 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4048 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
4049 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4051 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4053 guint32 lw = vtable->klass->instance_size;
4054 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4055 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4056 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4059 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4063 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4067 * Returns NULL and set the cfg exception on error.
4070 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4072 MonoInst *alloc, *ins;
4074 *out_cbb = cfg->cbb;
4076 if (mono_class_is_nullable (klass)) {
4077 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4080 /* FIXME: What if the class is shared? We might not
4081 have to get the method address from the RGCTX. */
4082 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4083 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4084 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4086 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4088 gboolean pass_vtable, pass_mrgctx;
4089 MonoInst *rgctx_arg = NULL;
4091 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4092 g_assert (!pass_mrgctx);
4095 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4098 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4101 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4105 if (mini_is_gsharedvt_klass (cfg, klass)) {
4106 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4107 MonoInst *res, *is_ref, *src_var, *addr;
4110 dreg = alloc_ireg (cfg);
4112 NEW_BBLOCK (cfg, is_ref_bb);
4113 NEW_BBLOCK (cfg, is_nullable_bb);
4114 NEW_BBLOCK (cfg, end_bb);
4115 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4116 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4117 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4120 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4123 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4126 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4127 ins->opcode = OP_STOREV_MEMBASE;
4129 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4130 res->type = STACK_OBJ;
4132 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4135 MONO_START_BB (cfg, is_ref_bb);
4136 addr_reg = alloc_ireg (cfg);
4138 /* val is a vtype, so has to load the value manually */
4139 src_var = get_vreg_to_inst (cfg, val->dreg);
4141 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4142 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4143 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4147 MONO_START_BB (cfg, is_nullable_bb);
4150 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4151 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4153 MonoMethodSignature *box_sig;
4156 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4157 * construct that method at JIT time, so have to do things by hand.
4159 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4160 box_sig->ret = &mono_defaults.object_class->byval_arg;
4161 box_sig->param_count = 1;
4162 box_sig->params [0] = &klass->byval_arg;
4163 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4164 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4165 res->type = STACK_OBJ;
4169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4171 MONO_START_BB (cfg, end_bb);
4173 *out_cbb = cfg->cbb;
4177 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4181 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4188 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4191 MonoGenericContainer *container;
4192 MonoGenericInst *ginst;
4194 if (klass->generic_class) {
4195 container = klass->generic_class->container_class->generic_container;
4196 ginst = klass->generic_class->context.class_inst;
4197 } else if (klass->generic_container && context_used) {
4198 container = klass->generic_container;
4199 ginst = container->context.class_inst;
4204 for (i = 0; i < container->type_argc; ++i) {
4206 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4208 type = ginst->type_argv [i];
4209 if (mini_type_is_reference (cfg, type))
4215 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4218 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4220 MonoMethod *mono_castclass;
4223 mono_castclass = mono_marshal_get_castclass_with_cache ();
4225 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4226 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4227 reset_cast_details (cfg);
4228 *out_bblock = cfg->cbb;
4234 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4243 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4246 if (cfg->compile_aot) {
4247 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4248 cfg->castclass_cache_index ++;
4249 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4250 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4252 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4255 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4257 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4261 * Returns NULL and set the cfg exception on error.
4264 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4266 MonoBasicBlock *is_null_bb;
4267 int obj_reg = src->dreg;
4268 int vtable_reg = alloc_preg (cfg);
4270 MonoInst *klass_inst = NULL, *res;
4271 MonoBasicBlock *bblock;
4275 context_used = mini_class_check_context_used (cfg, klass);
4277 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4278 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4279 (*inline_costs) += 2;
4282 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4283 MonoMethod *mono_castclass;
4284 MonoInst *iargs [1];
4287 mono_castclass = mono_marshal_get_castclass (klass);
4290 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4291 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4292 iargs, ip, cfg->real_offset, TRUE, &bblock);
4293 reset_cast_details (cfg);
4294 CHECK_CFG_EXCEPTION;
4295 g_assert (costs > 0);
4297 cfg->real_offset += 5;
4299 (*inline_costs) += costs;
4308 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4309 MonoInst *cache_ins;
4311 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4316 /* klass - it's the second element of the cache entry*/
4317 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4320 args [2] = cache_ins;
4322 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4325 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4328 NEW_BBLOCK (cfg, is_null_bb);
4330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4331 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4333 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4335 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4336 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4337 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4339 int klass_reg = alloc_preg (cfg);
4341 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4343 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4344 /* the remoting code is broken, access the class for now */
4345 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4346 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4348 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4349 cfg->exception_ptr = klass;
4352 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4355 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4357 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4359 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4360 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4364 MONO_START_BB (cfg, is_null_bb);
4366 reset_cast_details (cfg);
4377 * Returns NULL and set the cfg exception on error.
4380 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4383 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4384 int obj_reg = src->dreg;
4385 int vtable_reg = alloc_preg (cfg);
4386 int res_reg = alloc_ireg_ref (cfg);
4387 MonoInst *klass_inst = NULL;
4392 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4393 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4394 MonoInst *cache_ins;
4396 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4401 /* klass - it's the second element of the cache entry*/
4402 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4405 args [2] = cache_ins;
4407 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4410 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4413 NEW_BBLOCK (cfg, is_null_bb);
4414 NEW_BBLOCK (cfg, false_bb);
4415 NEW_BBLOCK (cfg, end_bb);
4417 /* Do the assignment at the beginning, so the other assignment can be if converted */
4418 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4419 ins->type = STACK_OBJ;
4422 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4423 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4425 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4427 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4428 g_assert (!context_used);
4429 /* the is_null_bb target simply copies the input register to the output */
4430 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4432 int klass_reg = alloc_preg (cfg);
4435 int rank_reg = alloc_preg (cfg);
4436 int eclass_reg = alloc_preg (cfg);
4438 g_assert (!context_used);
4439 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4441 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4442 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4443 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4444 if (klass->cast_class == mono_defaults.object_class) {
4445 int parent_reg = alloc_preg (cfg);
4446 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4447 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4448 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4449 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4450 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4451 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4452 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4454 } else if (klass->cast_class == mono_defaults.enum_class) {
4455 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4456 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4457 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4458 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4460 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4461 /* Check that the object is a vector too */
4462 int bounds_reg = alloc_preg (cfg);
4463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4464 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4465 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4468 /* the is_null_bb target simply copies the input register to the output */
4469 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4471 } else if (mono_class_is_nullable (klass)) {
4472 g_assert (!context_used);
4473 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4474 /* the is_null_bb target simply copies the input register to the output */
4475 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4477 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4478 g_assert (!context_used);
4479 /* the remoting code is broken, access the class for now */
4480 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4481 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4483 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4484 cfg->exception_ptr = klass;
4487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4490 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4493 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4496 /* the is_null_bb target simply copies the input register to the output */
4497 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4502 MONO_START_BB (cfg, false_bb);
4504 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4507 MONO_START_BB (cfg, is_null_bb);
4509 MONO_START_BB (cfg, end_bb);
4515 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4517 /* This opcode takes as input an object reference and a class, and returns:
4518 0) if the object is an instance of the class,
4519 1) if the object is not instance of the class,
4520 2) if the object is a proxy whose type cannot be determined */
4523 #ifndef DISABLE_REMOTING
4524 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4526 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4528 int obj_reg = src->dreg;
4529 int dreg = alloc_ireg (cfg);
4531 #ifndef DISABLE_REMOTING
4532 int klass_reg = alloc_preg (cfg);
4535 NEW_BBLOCK (cfg, true_bb);
4536 NEW_BBLOCK (cfg, false_bb);
4537 NEW_BBLOCK (cfg, end_bb);
4538 #ifndef DISABLE_REMOTING
4539 NEW_BBLOCK (cfg, false2_bb);
4540 NEW_BBLOCK (cfg, no_proxy_bb);
4543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4546 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4547 #ifndef DISABLE_REMOTING
4548 NEW_BBLOCK (cfg, interface_fail_bb);
4551 tmp_reg = alloc_preg (cfg);
4552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4553 #ifndef DISABLE_REMOTING
4554 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4555 MONO_START_BB (cfg, interface_fail_bb);
4556 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4558 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4560 tmp_reg = alloc_preg (cfg);
4561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4565 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4568 #ifndef DISABLE_REMOTING
4569 tmp_reg = alloc_preg (cfg);
4570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4573 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4574 tmp_reg = alloc_preg (cfg);
4575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4578 tmp_reg = alloc_preg (cfg);
4579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4581 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4583 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4584 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4586 MONO_START_BB (cfg, no_proxy_bb);
4588 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4590 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4594 MONO_START_BB (cfg, false_bb);
4596 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4599 #ifndef DISABLE_REMOTING
4600 MONO_START_BB (cfg, false2_bb);
4602 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4603 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4606 MONO_START_BB (cfg, true_bb);
4608 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4610 MONO_START_BB (cfg, end_bb);
4613 MONO_INST_NEW (cfg, ins, OP_ICONST);
4615 ins->type = STACK_I4;
4621 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4623 /* This opcode takes as input an object reference and a class, and returns:
4624 0) if the object is an instance of the class,
4625 1) if the object is a proxy whose type cannot be determined
4626 an InvalidCastException exception is thrown otherwhise*/
4629 #ifndef DISABLE_REMOTING
4630 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4632 MonoBasicBlock *ok_result_bb;
4634 int obj_reg = src->dreg;
4635 int dreg = alloc_ireg (cfg);
4636 int tmp_reg = alloc_preg (cfg);
4638 #ifndef DISABLE_REMOTING
4639 int klass_reg = alloc_preg (cfg);
4640 NEW_BBLOCK (cfg, end_bb);
4643 NEW_BBLOCK (cfg, ok_result_bb);
4645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4646 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4648 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4650 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4651 #ifndef DISABLE_REMOTING
4652 NEW_BBLOCK (cfg, interface_fail_bb);
4654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4655 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4656 MONO_START_BB (cfg, interface_fail_bb);
4657 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4659 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4661 tmp_reg = alloc_preg (cfg);
4662 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4663 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4664 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4666 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4667 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4670 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4674 #ifndef DISABLE_REMOTING
4675 NEW_BBLOCK (cfg, no_proxy_bb);
4677 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4679 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4681 tmp_reg = alloc_preg (cfg);
4682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4685 tmp_reg = alloc_preg (cfg);
4686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4690 NEW_BBLOCK (cfg, fail_1_bb);
4692 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4694 MONO_START_BB (cfg, fail_1_bb);
4696 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4699 MONO_START_BB (cfg, no_proxy_bb);
4701 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4703 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4707 MONO_START_BB (cfg, ok_result_bb);
4709 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4711 #ifndef DISABLE_REMOTING
4712 MONO_START_BB (cfg, end_bb);
4716 MONO_INST_NEW (cfg, ins, OP_ICONST);
4718 ins->type = STACK_I4;
4723 static G_GNUC_UNUSED MonoInst*
4724 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4726 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4727 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4728 gboolean is_i4 = TRUE;
4730 switch (enum_type->type) {
4733 #if SIZEOF_REGISTER == 8
4742 MonoInst *load, *and, *cmp, *ceq;
4743 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4744 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4745 int dest_reg = alloc_ireg (cfg);
4747 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4748 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4749 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4750 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4752 ceq->type = STACK_I4;
4755 load = mono_decompose_opcode (cfg, load, NULL);
4756 and = mono_decompose_opcode (cfg, and, NULL);
4757 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4758 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4766 * Returns NULL and set the cfg exception on error.
4768 static G_GNUC_UNUSED MonoInst*
4769 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4773 gpointer trampoline;
4774 MonoInst *obj, *method_ins, *tramp_ins;
4778 // FIXME reenable optimisation for virtual case
4783 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4786 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4790 obj = handle_alloc (cfg, klass, FALSE, 0);
4794 /* Inline the contents of mono_delegate_ctor */
4796 /* Set target field */
4797 /* Optimize away setting of NULL target */
4798 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4800 if (cfg->gen_write_barriers) {
4801 dreg = alloc_preg (cfg);
4802 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4803 emit_write_barrier (cfg, ptr, target);
4807 /* Set method field */
4808 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4809 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4812 * To avoid looking up the compiled code belonging to the target method
4813 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4814 * store it, and we fill it after the method has been compiled.
4816 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4817 MonoInst *code_slot_ins;
4820 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4822 domain = mono_domain_get ();
4823 mono_domain_lock (domain);
4824 if (!domain_jit_info (domain)->method_code_hash)
4825 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4826 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4828 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4829 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4831 mono_domain_unlock (domain);
4833 if (cfg->compile_aot)
4834 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4836 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4838 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4841 if (cfg->compile_aot) {
4842 MonoDelegateClassMethodPair *del_tramp;
4844 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4845 del_tramp->klass = klass;
4846 del_tramp->method = context_used ? NULL : method;
4847 del_tramp->virtual = virtual;
4848 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4851 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4853 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4854 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4857 /* Set invoke_impl field */
4859 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4861 dreg = alloc_preg (cfg);
4862 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4863 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4865 dreg = alloc_preg (cfg);
4866 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4867 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4870 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4876 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4878 MonoJitICallInfo *info;
4880 /* Need to register the icall so it gets an icall wrapper */
4881 info = mono_get_array_new_va_icall (rank);
4883 cfg->flags |= MONO_CFG_HAS_VARARGS;
4885 /* mono_array_new_va () needs a vararg calling convention */
4886 cfg->disable_llvm = TRUE;
4888 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4889 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4893 * handle_constrained_gsharedvt_call:
4895 * Handle constrained calls where the receiver is a gsharedvt type.
4896 * Return the instruction representing the call. Set the cfg exception on failure.
4899 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4900 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4902 MonoInst *ins = NULL;
4903 MonoBasicBlock *bblock = *ref_bblock;
4904 gboolean emit_widen = *ref_emit_widen;
4907 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4908 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4909 * pack the arguments into an array, and do the rest of the work in in an icall.
4911 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4912 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4913 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4914 MonoInst *args [16];
4917 * This case handles calls to
4918 * - object:ToString()/Equals()/GetHashCode(),
4919 * - System.IComparable<T>:CompareTo()
4920 * - System.IEquatable<T>:Equals ()
4921 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4925 if (mono_method_check_context_used (cmethod))
4926 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4928 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4929 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4931 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4932 if (fsig->hasthis && fsig->param_count) {
4933 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4934 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4935 ins->dreg = alloc_preg (cfg);
4936 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4937 MONO_ADD_INS (cfg->cbb, ins);
4940 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4943 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4945 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4946 addr_reg = ins->dreg;
4947 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4949 EMIT_NEW_ICONST (cfg, args [3], 0);
4950 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4953 EMIT_NEW_ICONST (cfg, args [3], 0);
4954 EMIT_NEW_ICONST (cfg, args [4], 0);
4956 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4959 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
4960 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
4961 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4965 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4966 MONO_ADD_INS (cfg->cbb, add);
4968 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4969 MONO_ADD_INS (cfg->cbb, ins);
4970 /* ins represents the call result */
4973 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4976 *ref_emit_widen = emit_widen;
4977 *ref_bblock = bblock;
4986 mono_emit_load_got_addr (MonoCompile *cfg)
4988 MonoInst *getaddr, *dummy_use;
4990 if (!cfg->got_var || cfg->got_var_allocated)
4993 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4994 getaddr->cil_code = cfg->header->code;
4995 getaddr->dreg = cfg->got_var->dreg;
4997 /* Add it to the start of the first bblock */
4998 if (cfg->bb_entry->code) {
4999 getaddr->next = cfg->bb_entry->code;
5000 cfg->bb_entry->code = getaddr;
5003 MONO_ADD_INS (cfg->bb_entry, getaddr);
5005 cfg->got_var_allocated = TRUE;
5008 * Add a dummy use to keep the got_var alive, since real uses might
5009 * only be generated by the back ends.
5010 * Add it to end_bblock, so the variable's lifetime covers the whole
5012 * It would be better to make the usage of the got var explicit in all
5013 * cases when the backend needs it (i.e. calls, throw etc.), so this
5014 * wouldn't be needed.
5016 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5017 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5020 static int inline_limit;
5021 static gboolean inline_limit_inited;
5024 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5026 MonoMethodHeaderSummary header;
5028 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5029 MonoMethodSignature *sig = mono_method_signature (method);
5033 if (cfg->disable_inline)
5035 if (cfg->generic_sharing_context)
5038 if (cfg->inline_depth > 10)
5041 #ifdef MONO_ARCH_HAVE_LMF_OPS
5042 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5043 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5044 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5049 if (!mono_method_get_header_summary (method, &header))
5052 /*runtime, icall and pinvoke are checked by summary call*/
5053 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5054 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5055 (mono_class_is_marshalbyref (method->klass)) ||
5059 /* also consider num_locals? */
5060 /* Do the size check early to avoid creating vtables */
5061 if (!inline_limit_inited) {
5062 if (g_getenv ("MONO_INLINELIMIT"))
5063 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5065 inline_limit = INLINE_LENGTH_LIMIT;
5066 inline_limit_inited = TRUE;
5068 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5072 * if we can initialize the class of the method right away, we do,
5073 * otherwise we don't allow inlining if the class needs initialization,
5074 * since it would mean inserting a call to mono_runtime_class_init()
5075 * inside the inlined code
5077 if (!(cfg->opt & MONO_OPT_SHARED)) {
5078 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5079 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5080 vtable = mono_class_vtable (cfg->domain, method->klass);
5083 if (!cfg->compile_aot)
5084 mono_runtime_class_init (vtable);
5085 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5086 if (cfg->run_cctors && method->klass->has_cctor) {
5087 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5088 if (!method->klass->runtime_info)
5089 /* No vtable created yet */
5091 vtable = mono_class_vtable (cfg->domain, method->klass);
5094 /* This makes so that inline cannot trigger */
5095 /* .cctors: too many apps depend on them */
5096 /* running with a specific order... */
5097 if (! vtable->initialized)
5099 mono_runtime_class_init (vtable);
5101 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5102 if (!method->klass->runtime_info)
5103 /* No vtable created yet */
5105 vtable = mono_class_vtable (cfg->domain, method->klass);
5108 if (!vtable->initialized)
5113 * If we're compiling for shared code
5114 * the cctor will need to be run at aot method load time, for example,
5115 * or at the end of the compilation of the inlining method.
5117 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5122 * CAS - do not inline methods with declarative security
5123 * Note: this has to be before any possible return TRUE;
5125 if (mono_security_method_has_declsec (method))
5128 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5129 if (mono_arch_is_soft_float ()) {
5131 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5133 for (i = 0; i < sig->param_count; ++i)
5134 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5139 if (g_list_find (cfg->dont_inline, method))
5146 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5148 if (!cfg->compile_aot) {
5150 if (vtable->initialized)
5154 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5155 if (cfg->method == method)
5159 if (!mono_class_needs_cctor_run (klass, method))
5162 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5163 /* The initialization is already done before the method is called */
5170 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5174 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5177 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5180 mono_class_init (klass);
5181 size = mono_class_array_element_size (klass);
5184 mult_reg = alloc_preg (cfg);
5185 array_reg = arr->dreg;
5186 index_reg = index->dreg;
5188 #if SIZEOF_REGISTER == 8
5189 /* The array reg is 64 bits but the index reg is only 32 */
5190 if (COMPILE_LLVM (cfg)) {
5192 index2_reg = index_reg;
5194 index2_reg = alloc_preg (cfg);
5195 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5198 if (index->type == STACK_I8) {
5199 index2_reg = alloc_preg (cfg);
5200 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5202 index2_reg = index_reg;
5207 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5209 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5210 if (size == 1 || size == 2 || size == 4 || size == 8) {
5211 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5213 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5214 ins->klass = mono_class_get_element_class (klass);
5215 ins->type = STACK_MP;
5221 add_reg = alloc_ireg_mp (cfg);
5224 MonoInst *rgctx_ins;
5227 g_assert (cfg->generic_sharing_context);
5228 context_used = mini_class_check_context_used (cfg, klass);
5229 g_assert (context_used);
5230 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5231 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5233 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5235 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5236 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5237 ins->klass = mono_class_get_element_class (klass);
5238 ins->type = STACK_MP;
5239 MONO_ADD_INS (cfg->cbb, ins);
5244 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5246 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5248 int bounds_reg = alloc_preg (cfg);
5249 int add_reg = alloc_ireg_mp (cfg);
5250 int mult_reg = alloc_preg (cfg);
5251 int mult2_reg = alloc_preg (cfg);
5252 int low1_reg = alloc_preg (cfg);
5253 int low2_reg = alloc_preg (cfg);
5254 int high1_reg = alloc_preg (cfg);
5255 int high2_reg = alloc_preg (cfg);
5256 int realidx1_reg = alloc_preg (cfg);
5257 int realidx2_reg = alloc_preg (cfg);
5258 int sum_reg = alloc_preg (cfg);
5259 int index1, index2, tmpreg;
5263 mono_class_init (klass);
5264 size = mono_class_array_element_size (klass);
5266 index1 = index_ins1->dreg;
5267 index2 = index_ins2->dreg;
5269 #if SIZEOF_REGISTER == 8
5270 /* The array reg is 64 bits but the index reg is only 32 */
5271 if (COMPILE_LLVM (cfg)) {
5274 tmpreg = alloc_preg (cfg);
5275 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5277 tmpreg = alloc_preg (cfg);
5278 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5282 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5286 /* range checking */
5287 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5288 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5290 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5291 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5292 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5293 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5294 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5295 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5296 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5298 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5299 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5300 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5301 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5302 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5303 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5304 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5306 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5307 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5308 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5309 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5310 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5312 ins->type = STACK_MP;
5314 MONO_ADD_INS (cfg->cbb, ins);
5321 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5325 MonoMethod *addr_method;
5327 MonoClass *eclass = cmethod->klass->element_class;
5329 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5332 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5334 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5335 /* emit_ldelema_2 depends on OP_LMUL */
5336 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5337 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5341 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5344 element_size = mono_class_array_element_size (eclass);
5345 addr_method = mono_marshal_get_array_address (rank, element_size);
5346 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5351 static MonoBreakPolicy
5352 always_insert_breakpoint (MonoMethod *method)
5354 return MONO_BREAK_POLICY_ALWAYS;
5357 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5360 * mono_set_break_policy:
5361 * policy_callback: the new callback function
5363 * Allow embedders to decide wherther to actually obey breakpoint instructions
5364 * (both break IL instructions and Debugger.Break () method calls), for example
5365 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5366 * untrusted or semi-trusted code.
5368 * @policy_callback will be called every time a break point instruction needs to
5369 * be inserted with the method argument being the method that calls Debugger.Break()
5370 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5371 * if it wants the breakpoint to not be effective in the given method.
5372 * #MONO_BREAK_POLICY_ALWAYS is the default.
5375 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5377 if (policy_callback)
5378 break_policy_func = policy_callback;
5380 break_policy_func = always_insert_breakpoint;
5384 should_insert_brekpoint (MonoMethod *method) {
5385 switch (break_policy_func (method)) {
5386 case MONO_BREAK_POLICY_ALWAYS:
5388 case MONO_BREAK_POLICY_NEVER:
5390 case MONO_BREAK_POLICY_ON_DBG:
5391 g_warning ("mdb no longer supported");
5394 g_warning ("Incorrect value returned from break policy callback");
5399 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5401 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5403 MonoInst *addr, *store, *load;
5404 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5406 /* the bounds check is already done by the callers */
5407 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5409 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5410 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5411 if (mini_type_is_reference (cfg, fsig->params [2]))
5412 emit_write_barrier (cfg, addr, load);
5414 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5415 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5422 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5424 return mini_type_is_reference (cfg, &klass->byval_arg);
5428 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5430 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5431 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5432 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5433 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5434 MonoInst *iargs [3];
5437 mono_class_setup_vtable (obj_array);
5438 g_assert (helper->slot);
5440 if (sp [0]->type != STACK_OBJ)
5442 if (sp [2]->type != STACK_OBJ)
5449 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5453 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5456 // FIXME-VT: OP_ICONST optimization
5457 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5458 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5459 ins->opcode = OP_STOREV_MEMBASE;
5460 } else if (sp [1]->opcode == OP_ICONST) {
5461 int array_reg = sp [0]->dreg;
5462 int index_reg = sp [1]->dreg;
5463 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5466 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5467 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5469 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5470 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5471 if (generic_class_is_reference_type (cfg, klass))
5472 emit_write_barrier (cfg, addr, sp [2]);
5479 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5484 eklass = mono_class_from_mono_type (fsig->params [2]);
5486 eklass = mono_class_from_mono_type (fsig->ret);
5489 return emit_array_store (cfg, eklass, args, FALSE);
5491 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5492 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5498 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5502 //Only allow for valuetypes
5503 if (!param_klass->valuetype || !return_klass->valuetype)
5507 if (param_klass->has_references || return_klass->has_references)
5510 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5511 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5512 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5515 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5516 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5519 //And have the same size
5520 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5526 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5528 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5529 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5531 //Valuetypes that are semantically equivalent
5532 if (is_unsafe_mov_compatible (param_klass, return_klass))
5535 //Arrays of valuetypes that are semantically equivalent
5536 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5543 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5545 #ifdef MONO_ARCH_SIMD_INTRINSICS
5546 MonoInst *ins = NULL;
5548 if (cfg->opt & MONO_OPT_SIMD) {
5549 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5555 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5559 emit_memory_barrier (MonoCompile *cfg, int kind)
5561 MonoInst *ins = NULL;
5562 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5563 MONO_ADD_INS (cfg->cbb, ins);
5564 ins->backend.memory_barrier_kind = kind;
5570 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5572 MonoInst *ins = NULL;
5575 /* The LLVM backend supports these intrinsics */
5576 if (cmethod->klass == mono_defaults.math_class) {
5577 if (strcmp (cmethod->name, "Sin") == 0) {
5579 } else if (strcmp (cmethod->name, "Cos") == 0) {
5581 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5583 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5587 if (opcode && fsig->param_count == 1) {
5588 MONO_INST_NEW (cfg, ins, opcode);
5589 ins->type = STACK_R8;
5590 ins->dreg = mono_alloc_freg (cfg);
5591 ins->sreg1 = args [0]->dreg;
5592 MONO_ADD_INS (cfg->cbb, ins);
5596 if (cfg->opt & MONO_OPT_CMOV) {
5597 if (strcmp (cmethod->name, "Min") == 0) {
5598 if (fsig->params [0]->type == MONO_TYPE_I4)
5600 if (fsig->params [0]->type == MONO_TYPE_U4)
5601 opcode = OP_IMIN_UN;
5602 else if (fsig->params [0]->type == MONO_TYPE_I8)
5604 else if (fsig->params [0]->type == MONO_TYPE_U8)
5605 opcode = OP_LMIN_UN;
5606 } else if (strcmp (cmethod->name, "Max") == 0) {
5607 if (fsig->params [0]->type == MONO_TYPE_I4)
5609 if (fsig->params [0]->type == MONO_TYPE_U4)
5610 opcode = OP_IMAX_UN;
5611 else if (fsig->params [0]->type == MONO_TYPE_I8)
5613 else if (fsig->params [0]->type == MONO_TYPE_U8)
5614 opcode = OP_LMAX_UN;
5618 if (opcode && fsig->param_count == 2) {
5619 MONO_INST_NEW (cfg, ins, opcode);
5620 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5621 ins->dreg = mono_alloc_ireg (cfg);
5622 ins->sreg1 = args [0]->dreg;
5623 ins->sreg2 = args [1]->dreg;
5624 MONO_ADD_INS (cfg->cbb, ins);
5632 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5634 if (cmethod->klass == mono_defaults.array_class) {
5635 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5636 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5637 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5638 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5639 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5640 return emit_array_unsafe_mov (cfg, fsig, args);
5647 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5649 MonoInst *ins = NULL;
5651 static MonoClass *runtime_helpers_class = NULL;
5652 if (! runtime_helpers_class)
5653 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5654 "System.Runtime.CompilerServices", "RuntimeHelpers");
5656 if (cmethod->klass == mono_defaults.string_class) {
5657 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
5658 int dreg = alloc_ireg (cfg);
5659 int index_reg = alloc_preg (cfg);
5660 int mult_reg = alloc_preg (cfg);
5661 int add_reg = alloc_preg (cfg);
5663 #if SIZEOF_REGISTER == 8
5664 /* The array reg is 64 bits but the index reg is only 32 */
5665 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5667 index_reg = args [1]->dreg;
5669 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5671 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5672 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5673 add_reg = ins->dreg;
5674 /* Avoid a warning */
5676 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5680 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5681 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5682 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5684 type_from_op (cfg, ins, NULL, NULL);
5686 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5687 int dreg = alloc_ireg (cfg);
5688 /* Decompose later to allow more optimizations */
5689 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5690 ins->type = STACK_I4;
5691 ins->flags |= MONO_INST_FAULT;
5692 cfg->cbb->has_array_access = TRUE;
5693 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5696 } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
5697 int mult_reg = alloc_preg (cfg);
5698 int add_reg = alloc_preg (cfg);
5700 /* The corlib functions check for oob already. */
5701 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5702 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5703 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5704 return cfg->cbb->last_ins;
5707 } else if (cmethod->klass == mono_defaults.object_class) {
5709 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
5710 int dreg = alloc_ireg_ref (cfg);
5711 int vt_reg = alloc_preg (cfg);
5712 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5713 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5714 type_from_op (cfg, ins, NULL, NULL);
5717 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5718 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5719 int dreg = alloc_ireg (cfg);
5720 int t1 = alloc_ireg (cfg);
5722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5723 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5724 ins->type = STACK_I4;
5728 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5729 MONO_INST_NEW (cfg, ins, OP_NOP);
5730 MONO_ADD_INS (cfg->cbb, ins);
5734 } else if (cmethod->klass == mono_defaults.array_class) {
5735 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5736 return emit_array_generic_access (cfg, fsig, args, FALSE);
5737 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5738 return emit_array_generic_access (cfg, fsig, args, TRUE);
5740 #ifndef MONO_BIG_ARRAYS
5742 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5745 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
5746 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
5747 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5748 int dreg = alloc_ireg (cfg);
5749 int bounds_reg = alloc_ireg_mp (cfg);
5750 MonoBasicBlock *end_bb, *szarray_bb;
5751 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5753 NEW_BBLOCK (cfg, end_bb);
5754 NEW_BBLOCK (cfg, szarray_bb);
5756 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5757 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5759 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5760 /* Non-szarray case */
5762 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5763 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5765 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5766 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5767 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5768 MONO_START_BB (cfg, szarray_bb);
5771 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5772 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5774 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5775 MONO_START_BB (cfg, end_bb);
5777 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5778 ins->type = STACK_I4;
5784 if (cmethod->name [0] != 'g')
5787 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
5788 int dreg = alloc_ireg (cfg);
5789 int vtable_reg = alloc_preg (cfg);
5790 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5791 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5792 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5793 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5794 type_from_op (cfg, ins, NULL, NULL);
5797 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5798 int dreg = alloc_ireg (cfg);
5800 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5801 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5802 type_from_op (cfg, ins, NULL, NULL);
5807 } else if (cmethod->klass == runtime_helpers_class) {
5809 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5810 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5814 } else if (cmethod->klass == mono_defaults.thread_class) {
5815 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5816 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5817 MONO_ADD_INS (cfg->cbb, ins);
5819 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5820 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5821 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5823 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5825 if (fsig->params [0]->type == MONO_TYPE_I1)
5826 opcode = OP_LOADI1_MEMBASE;
5827 else if (fsig->params [0]->type == MONO_TYPE_U1)
5828 opcode = OP_LOADU1_MEMBASE;
5829 else if (fsig->params [0]->type == MONO_TYPE_I2)
5830 opcode = OP_LOADI2_MEMBASE;
5831 else if (fsig->params [0]->type == MONO_TYPE_U2)
5832 opcode = OP_LOADU2_MEMBASE;
5833 else if (fsig->params [0]->type == MONO_TYPE_I4)
5834 opcode = OP_LOADI4_MEMBASE;
5835 else if (fsig->params [0]->type == MONO_TYPE_U4)
5836 opcode = OP_LOADU4_MEMBASE;
5837 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5838 opcode = OP_LOADI8_MEMBASE;
5839 else if (fsig->params [0]->type == MONO_TYPE_R4)
5840 opcode = OP_LOADR4_MEMBASE;
5841 else if (fsig->params [0]->type == MONO_TYPE_R8)
5842 opcode = OP_LOADR8_MEMBASE;
5843 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5844 opcode = OP_LOAD_MEMBASE;
5847 MONO_INST_NEW (cfg, ins, opcode);
5848 ins->inst_basereg = args [0]->dreg;
5849 ins->inst_offset = 0;
5850 MONO_ADD_INS (cfg->cbb, ins);
5852 switch (fsig->params [0]->type) {
5859 ins->dreg = mono_alloc_ireg (cfg);
5860 ins->type = STACK_I4;
5864 ins->dreg = mono_alloc_lreg (cfg);
5865 ins->type = STACK_I8;
5869 ins->dreg = mono_alloc_ireg (cfg);
5870 #if SIZEOF_REGISTER == 8
5871 ins->type = STACK_I8;
5873 ins->type = STACK_I4;
5878 ins->dreg = mono_alloc_freg (cfg);
5879 ins->type = STACK_R8;
5882 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5883 ins->dreg = mono_alloc_ireg_ref (cfg);
5884 ins->type = STACK_OBJ;
5888 if (opcode == OP_LOADI8_MEMBASE)
5889 ins = mono_decompose_opcode (cfg, ins, NULL);
5891 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5895 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5897 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5899 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5900 opcode = OP_STOREI1_MEMBASE_REG;
5901 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5902 opcode = OP_STOREI2_MEMBASE_REG;
5903 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5904 opcode = OP_STOREI4_MEMBASE_REG;
5905 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5906 opcode = OP_STOREI8_MEMBASE_REG;
5907 else if (fsig->params [0]->type == MONO_TYPE_R4)
5908 opcode = OP_STORER4_MEMBASE_REG;
5909 else if (fsig->params [0]->type == MONO_TYPE_R8)
5910 opcode = OP_STORER8_MEMBASE_REG;
5911 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5912 opcode = OP_STORE_MEMBASE_REG;
5915 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5917 MONO_INST_NEW (cfg, ins, opcode);
5918 ins->sreg1 = args [1]->dreg;
5919 ins->inst_destbasereg = args [0]->dreg;
5920 ins->inst_offset = 0;
5921 MONO_ADD_INS (cfg->cbb, ins);
5923 if (opcode == OP_STOREI8_MEMBASE_REG)
5924 ins = mono_decompose_opcode (cfg, ins, NULL);
5929 } else if (cmethod->klass == mono_defaults.monitor_class) {
5930 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5931 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5934 if (COMPILE_LLVM (cfg)) {
5936 * Pass the argument normally, the LLVM backend will handle the
5937 * calling convention problems.
5939 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5941 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5942 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5943 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5944 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5947 return (MonoInst*)call;
5948 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5949 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5952 if (COMPILE_LLVM (cfg)) {
5954 * Pass the argument normally, the LLVM backend will handle the
5955 * calling convention problems.
5957 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5959 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5960 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5961 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5962 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5965 return (MonoInst*)call;
5967 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
5970 if (COMPILE_LLVM (cfg)) {
5971 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5973 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5974 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5975 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5976 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5979 return (MonoInst*)call;
5982 } else if (cmethod->klass->image == mono_defaults.corlib &&
5983 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5984 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5987 #if SIZEOF_REGISTER == 8
5988 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5989 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5990 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5991 ins->dreg = mono_alloc_preg (cfg);
5992 ins->sreg1 = args [0]->dreg;
5993 ins->type = STACK_I8;
5994 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5995 MONO_ADD_INS (cfg->cbb, ins);
5999 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6001 /* 64 bit reads are already atomic */
6002 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6003 load_ins->dreg = mono_alloc_preg (cfg);
6004 load_ins->inst_basereg = args [0]->dreg;
6005 load_ins->inst_offset = 0;
6006 load_ins->type = STACK_I8;
6007 MONO_ADD_INS (cfg->cbb, load_ins);
6009 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6016 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6017 MonoInst *ins_iconst;
6020 if (fsig->params [0]->type == MONO_TYPE_I4) {
6021 opcode = OP_ATOMIC_ADD_I4;
6022 cfg->has_atomic_add_i4 = TRUE;
6024 #if SIZEOF_REGISTER == 8
6025 else if (fsig->params [0]->type == MONO_TYPE_I8)
6026 opcode = OP_ATOMIC_ADD_I8;
6029 if (!mono_arch_opcode_supported (opcode))
6031 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6032 ins_iconst->inst_c0 = 1;
6033 ins_iconst->dreg = mono_alloc_ireg (cfg);
6034 MONO_ADD_INS (cfg->cbb, ins_iconst);
6036 MONO_INST_NEW (cfg, ins, opcode);
6037 ins->dreg = mono_alloc_ireg (cfg);
6038 ins->inst_basereg = args [0]->dreg;
6039 ins->inst_offset = 0;
6040 ins->sreg2 = ins_iconst->dreg;
6041 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6042 MONO_ADD_INS (cfg->cbb, ins);
6044 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6045 MonoInst *ins_iconst;
6048 if (fsig->params [0]->type == MONO_TYPE_I4) {
6049 opcode = OP_ATOMIC_ADD_I4;
6050 cfg->has_atomic_add_i4 = TRUE;
6052 #if SIZEOF_REGISTER == 8
6053 else if (fsig->params [0]->type == MONO_TYPE_I8)
6054 opcode = OP_ATOMIC_ADD_I8;
6057 if (!mono_arch_opcode_supported (opcode))
6059 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6060 ins_iconst->inst_c0 = -1;
6061 ins_iconst->dreg = mono_alloc_ireg (cfg);
6062 MONO_ADD_INS (cfg->cbb, ins_iconst);
6064 MONO_INST_NEW (cfg, ins, opcode);
6065 ins->dreg = mono_alloc_ireg (cfg);
6066 ins->inst_basereg = args [0]->dreg;
6067 ins->inst_offset = 0;
6068 ins->sreg2 = ins_iconst->dreg;
6069 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6070 MONO_ADD_INS (cfg->cbb, ins);
6072 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6075 if (fsig->params [0]->type == MONO_TYPE_I4) {
6076 opcode = OP_ATOMIC_ADD_I4;
6077 cfg->has_atomic_add_i4 = TRUE;
6079 #if SIZEOF_REGISTER == 8
6080 else if (fsig->params [0]->type == MONO_TYPE_I8)
6081 opcode = OP_ATOMIC_ADD_I8;
6084 if (!mono_arch_opcode_supported (opcode))
6086 MONO_INST_NEW (cfg, ins, opcode);
6087 ins->dreg = mono_alloc_ireg (cfg);
6088 ins->inst_basereg = args [0]->dreg;
6089 ins->inst_offset = 0;
6090 ins->sreg2 = args [1]->dreg;
6091 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6092 MONO_ADD_INS (cfg->cbb, ins);
6095 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6096 MonoInst *f2i = NULL, *i2f;
6097 guint32 opcode, f2i_opcode, i2f_opcode;
6098 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6099 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6101 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6102 fsig->params [0]->type == MONO_TYPE_R4) {
6103 opcode = OP_ATOMIC_EXCHANGE_I4;
6104 f2i_opcode = OP_MOVE_F_TO_I4;
6105 i2f_opcode = OP_MOVE_I4_TO_F;
6106 cfg->has_atomic_exchange_i4 = TRUE;
6108 #if SIZEOF_REGISTER == 8
6110 fsig->params [0]->type == MONO_TYPE_I8 ||
6111 fsig->params [0]->type == MONO_TYPE_R8 ||
6112 fsig->params [0]->type == MONO_TYPE_I) {
6113 opcode = OP_ATOMIC_EXCHANGE_I8;
6114 f2i_opcode = OP_MOVE_F_TO_I8;
6115 i2f_opcode = OP_MOVE_I8_TO_F;
6118 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6119 opcode = OP_ATOMIC_EXCHANGE_I4;
6120 cfg->has_atomic_exchange_i4 = TRUE;
6126 if (!mono_arch_opcode_supported (opcode))
6130 /* TODO: Decompose these opcodes instead of bailing here. */
6131 if (COMPILE_SOFT_FLOAT (cfg))
6134 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6135 f2i->dreg = mono_alloc_ireg (cfg);
6136 f2i->sreg1 = args [1]->dreg;
6137 if (f2i_opcode == OP_MOVE_F_TO_I4)
6138 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6139 MONO_ADD_INS (cfg->cbb, f2i);
6142 MONO_INST_NEW (cfg, ins, opcode);
6143 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6144 ins->inst_basereg = args [0]->dreg;
6145 ins->inst_offset = 0;
6146 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6147 MONO_ADD_INS (cfg->cbb, ins);
6149 switch (fsig->params [0]->type) {
6151 ins->type = STACK_I4;
6154 ins->type = STACK_I8;
6157 #if SIZEOF_REGISTER == 8
6158 ins->type = STACK_I8;
6160 ins->type = STACK_I4;
6165 ins->type = STACK_R8;
6168 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6169 ins->type = STACK_OBJ;
6174 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6175 i2f->dreg = mono_alloc_freg (cfg);
6176 i2f->sreg1 = ins->dreg;
6177 i2f->type = STACK_R8;
6178 if (i2f_opcode == OP_MOVE_I4_TO_F)
6179 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6180 MONO_ADD_INS (cfg->cbb, i2f);
6185 if (cfg->gen_write_barriers && is_ref)
6186 emit_write_barrier (cfg, args [0], args [1]);
6188 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6189 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6190 guint32 opcode, f2i_opcode, i2f_opcode;
6191 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6192 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6194 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6195 fsig->params [1]->type == MONO_TYPE_R4) {
6196 opcode = OP_ATOMIC_CAS_I4;
6197 f2i_opcode = OP_MOVE_F_TO_I4;
6198 i2f_opcode = OP_MOVE_I4_TO_F;
6199 cfg->has_atomic_cas_i4 = TRUE;
6201 #if SIZEOF_REGISTER == 8
6203 fsig->params [1]->type == MONO_TYPE_I8 ||
6204 fsig->params [1]->type == MONO_TYPE_R8 ||
6205 fsig->params [1]->type == MONO_TYPE_I) {
6206 opcode = OP_ATOMIC_CAS_I8;
6207 f2i_opcode = OP_MOVE_F_TO_I8;
6208 i2f_opcode = OP_MOVE_I8_TO_F;
6211 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6212 opcode = OP_ATOMIC_CAS_I4;
6213 cfg->has_atomic_cas_i4 = TRUE;
6219 if (!mono_arch_opcode_supported (opcode))
6223 /* TODO: Decompose these opcodes instead of bailing here. */
6224 if (COMPILE_SOFT_FLOAT (cfg))
6227 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6228 f2i_new->dreg = mono_alloc_ireg (cfg);
6229 f2i_new->sreg1 = args [1]->dreg;
6230 if (f2i_opcode == OP_MOVE_F_TO_I4)
6231 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6232 MONO_ADD_INS (cfg->cbb, f2i_new);
6234 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6235 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6236 f2i_cmp->sreg1 = args [2]->dreg;
6237 if (f2i_opcode == OP_MOVE_F_TO_I4)
6238 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6239 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6242 MONO_INST_NEW (cfg, ins, opcode);
6243 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6244 ins->sreg1 = args [0]->dreg;
6245 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6246 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6247 MONO_ADD_INS (cfg->cbb, ins);
6249 switch (fsig->params [1]->type) {
6251 ins->type = STACK_I4;
6254 ins->type = STACK_I8;
6257 #if SIZEOF_REGISTER == 8
6258 ins->type = STACK_I8;
6260 ins->type = STACK_I4;
6265 ins->type = STACK_R8;
6268 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6269 ins->type = STACK_OBJ;
6274 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6275 i2f->dreg = mono_alloc_freg (cfg);
6276 i2f->sreg1 = ins->dreg;
6277 i2f->type = STACK_R8;
6278 if (i2f_opcode == OP_MOVE_I4_TO_F)
6279 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6280 MONO_ADD_INS (cfg->cbb, i2f);
6285 if (cfg->gen_write_barriers && is_ref)
6286 emit_write_barrier (cfg, args [0], args [1]);
6288 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6289 fsig->params [1]->type == MONO_TYPE_I4) {
6290 MonoInst *cmp, *ceq;
6292 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6295 /* int32 r = CAS (location, value, comparand); */
6296 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6297 ins->dreg = alloc_ireg (cfg);
6298 ins->sreg1 = args [0]->dreg;
6299 ins->sreg2 = args [1]->dreg;
6300 ins->sreg3 = args [2]->dreg;
6301 ins->type = STACK_I4;
6302 MONO_ADD_INS (cfg->cbb, ins);
6304 /* bool result = r == comparand; */
6305 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6306 cmp->sreg1 = ins->dreg;
6307 cmp->sreg2 = args [2]->dreg;
6308 cmp->type = STACK_I4;
6309 MONO_ADD_INS (cfg->cbb, cmp);
6311 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6312 ceq->dreg = alloc_ireg (cfg);
6313 ceq->type = STACK_I4;
6314 MONO_ADD_INS (cfg->cbb, ceq);
6316 /* *success = result; */
6317 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6319 cfg->has_atomic_cas_i4 = TRUE;
6321 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6322 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6326 } else if (cmethod->klass->image == mono_defaults.corlib &&
6327 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6328 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6331 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6333 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6334 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6336 if (fsig->params [0]->type == MONO_TYPE_I1)
6337 opcode = OP_ATOMIC_LOAD_I1;
6338 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6339 opcode = OP_ATOMIC_LOAD_U1;
6340 else if (fsig->params [0]->type == MONO_TYPE_I2)
6341 opcode = OP_ATOMIC_LOAD_I2;
6342 else if (fsig->params [0]->type == MONO_TYPE_U2)
6343 opcode = OP_ATOMIC_LOAD_U2;
6344 else if (fsig->params [0]->type == MONO_TYPE_I4)
6345 opcode = OP_ATOMIC_LOAD_I4;
6346 else if (fsig->params [0]->type == MONO_TYPE_U4)
6347 opcode = OP_ATOMIC_LOAD_U4;
6348 else if (fsig->params [0]->type == MONO_TYPE_R4)
6349 opcode = OP_ATOMIC_LOAD_R4;
6350 else if (fsig->params [0]->type == MONO_TYPE_R8)
6351 opcode = OP_ATOMIC_LOAD_R8;
6352 #if SIZEOF_REGISTER == 8
6353 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6354 opcode = OP_ATOMIC_LOAD_I8;
6355 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6356 opcode = OP_ATOMIC_LOAD_U8;
6358 else if (fsig->params [0]->type == MONO_TYPE_I)
6359 opcode = OP_ATOMIC_LOAD_I4;
6360 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6361 opcode = OP_ATOMIC_LOAD_U4;
6365 if (!mono_arch_opcode_supported (opcode))
6368 MONO_INST_NEW (cfg, ins, opcode);
6369 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6370 ins->sreg1 = args [0]->dreg;
6371 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6372 MONO_ADD_INS (cfg->cbb, ins);
6374 switch (fsig->params [0]->type) {
6375 case MONO_TYPE_BOOLEAN:
6382 ins->type = STACK_I4;
6386 ins->type = STACK_I8;
6390 #if SIZEOF_REGISTER == 8
6391 ins->type = STACK_I8;
6393 ins->type = STACK_I4;
6398 ins->type = STACK_R8;
6401 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6402 ins->type = STACK_OBJ;
6408 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6410 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6412 if (fsig->params [0]->type == MONO_TYPE_I1)
6413 opcode = OP_ATOMIC_STORE_I1;
6414 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6415 opcode = OP_ATOMIC_STORE_U1;
6416 else if (fsig->params [0]->type == MONO_TYPE_I2)
6417 opcode = OP_ATOMIC_STORE_I2;
6418 else if (fsig->params [0]->type == MONO_TYPE_U2)
6419 opcode = OP_ATOMIC_STORE_U2;
6420 else if (fsig->params [0]->type == MONO_TYPE_I4)
6421 opcode = OP_ATOMIC_STORE_I4;
6422 else if (fsig->params [0]->type == MONO_TYPE_U4)
6423 opcode = OP_ATOMIC_STORE_U4;
6424 else if (fsig->params [0]->type == MONO_TYPE_R4)
6425 opcode = OP_ATOMIC_STORE_R4;
6426 else if (fsig->params [0]->type == MONO_TYPE_R8)
6427 opcode = OP_ATOMIC_STORE_R8;
6428 #if SIZEOF_REGISTER == 8
6429 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6430 opcode = OP_ATOMIC_STORE_I8;
6431 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6432 opcode = OP_ATOMIC_STORE_U8;
6434 else if (fsig->params [0]->type == MONO_TYPE_I)
6435 opcode = OP_ATOMIC_STORE_I4;
6436 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6437 opcode = OP_ATOMIC_STORE_U4;
6441 if (!mono_arch_opcode_supported (opcode))
6444 MONO_INST_NEW (cfg, ins, opcode);
6445 ins->dreg = args [0]->dreg;
6446 ins->sreg1 = args [1]->dreg;
6447 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6448 MONO_ADD_INS (cfg->cbb, ins);
6450 if (cfg->gen_write_barriers && is_ref)
6451 emit_write_barrier (cfg, args [0], args [1]);
6457 } else if (cmethod->klass->image == mono_defaults.corlib &&
6458 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6459 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6460 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6461 if (should_insert_brekpoint (cfg->method)) {
6462 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6464 MONO_INST_NEW (cfg, ins, OP_NOP);
6465 MONO_ADD_INS (cfg->cbb, ins);
6469 } else if (cmethod->klass->image == mono_defaults.corlib &&
6470 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6471 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6472 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6474 EMIT_NEW_ICONST (cfg, ins, 1);
6476 EMIT_NEW_ICONST (cfg, ins, 0);
6479 } else if (cmethod->klass == mono_defaults.math_class) {
6481 * There is general branchless code for Min/Max, but it does not work for
6483 * http://everything2.com/?node_id=1051618
6485 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6486 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6487 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6488 !strcmp (cmethod->klass->name, "Selector")) {
6489 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6490 if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
6491 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6494 MonoJumpInfoToken *ji;
6497 cfg->disable_llvm = TRUE;
6499 if (args [0]->opcode == OP_GOT_ENTRY) {
6500 pi = args [0]->inst_p1;
6501 g_assert (pi->opcode == OP_PATCH_INFO);
6502 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6505 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6506 ji = args [0]->inst_p0;
6509 NULLIFY_INS (args [0]);
6512 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6513 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6514 ins->dreg = mono_alloc_ireg (cfg);
6516 ins->inst_p0 = mono_string_to_utf8 (s);
6517 MONO_ADD_INS (cfg->cbb, ins);
6523 #ifdef MONO_ARCH_SIMD_INTRINSICS
6524 if (cfg->opt & MONO_OPT_SIMD) {
6525 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6531 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6535 if (COMPILE_LLVM (cfg)) {
6536 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6541 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6545 * This entry point could be used later for arbitrary method
6548 inline static MonoInst*
6549 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6550 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6552 if (method->klass == mono_defaults.string_class) {
6553 /* managed string allocation support */
6554 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6555 MonoInst *iargs [2];
6556 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6557 MonoMethod *managed_alloc = NULL;
6559 g_assert (vtable); /*Should not fail since it System.String*/
6560 #ifndef MONO_CROSS_COMPILE
6561 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6565 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6566 iargs [1] = args [0];
6567 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6574 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6576 MonoInst *store, *temp;
6579 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6580 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6583 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6584 * would be different than the MonoInst's used to represent arguments, and
6585 * the ldelema implementation can't deal with that.
6586 * Solution: When ldelema is used on an inline argument, create a var for
6587 * it, emit ldelema on that var, and emit the saving code below in
6588 * inline_method () if needed.
6590 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6591 cfg->args [i] = temp;
6592 /* This uses cfg->args [i] which is set by the preceeding line */
6593 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6594 store->cil_code = sp [0]->cil_code;
6599 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6600 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6602 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6604 check_inline_called_method_name_limit (MonoMethod *called_method)
6607 static const char *limit = NULL;
6609 if (limit == NULL) {
6610 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6612 if (limit_string != NULL)
6613 limit = limit_string;
6618 if (limit [0] != '\0') {
6619 char *called_method_name = mono_method_full_name (called_method, TRUE);
6621 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6622 g_free (called_method_name);
6624 //return (strncmp_result <= 0);
6625 return (strncmp_result == 0);
6632 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6634 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6637 static const char *limit = NULL;
6639 if (limit == NULL) {
6640 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6641 if (limit_string != NULL) {
6642 limit = limit_string;
6648 if (limit [0] != '\0') {
6649 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6651 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6652 g_free (caller_method_name);
6654 //return (strncmp_result <= 0);
6655 return (strncmp_result == 0);
6663 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6665 static double r8_0 = 0.0;
6666 static float r4_0 = 0.0;
6670 rtype = mini_get_underlying_type (cfg, rtype);
6674 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6675 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6676 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6677 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6678 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6679 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6680 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6681 ins->type = STACK_R4;
6682 ins->inst_p0 = (void*)&r4_0;
6684 MONO_ADD_INS (cfg->cbb, ins);
6685 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6686 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6687 ins->type = STACK_R8;
6688 ins->inst_p0 = (void*)&r8_0;
6690 MONO_ADD_INS (cfg->cbb, ins);
6691 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6692 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6693 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6694 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6695 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6697 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6702 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6706 rtype = mini_get_underlying_type (cfg, rtype);
6710 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6711 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6712 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6713 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6714 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6715 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6716 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6717 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6718 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6719 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6720 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6721 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6722 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6723 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6725 emit_init_rvar (cfg, dreg, rtype);
6729 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6731 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6733 MonoInst *var = cfg->locals [local];
6734 if (COMPILE_SOFT_FLOAT (cfg)) {
6736 int reg = alloc_dreg (cfg, var->type);
6737 emit_init_rvar (cfg, reg, type);
6738 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6741 emit_init_rvar (cfg, var->dreg, type);
6743 emit_dummy_init_rvar (cfg, var->dreg, type);
6750 * Return the cost of inlining CMETHOD.
6753 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6754 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6756 MonoInst *ins, *rvar = NULL;
6757 MonoMethodHeader *cheader;
6758 MonoBasicBlock *ebblock, *sbblock;
6760 MonoMethod *prev_inlined_method;
6761 MonoInst **prev_locals, **prev_args;
6762 MonoType **prev_arg_types;
6763 guint prev_real_offset;
6764 GHashTable *prev_cbb_hash;
6765 MonoBasicBlock **prev_cil_offset_to_bb;
6766 MonoBasicBlock *prev_cbb;
6767 unsigned char* prev_cil_start;
6768 guint32 prev_cil_offset_to_bb_len;
6769 MonoMethod *prev_current_method;
6770 MonoGenericContext *prev_generic_context;
6771 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6773 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6775 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6776 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6779 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6780 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6785 fsig = mono_method_signature (cmethod);
6787 if (cfg->verbose_level > 2)
6788 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6790 if (!cmethod->inline_info) {
6791 cfg->stat_inlineable_methods++;
6792 cmethod->inline_info = 1;
6795 /* allocate local variables */
6796 cheader = mono_method_get_header (cmethod);
6798 if (cheader == NULL || mono_loader_get_last_error ()) {
6799 MonoLoaderError *error = mono_loader_get_last_error ();
6802 mono_metadata_free_mh (cheader);
6803 if (inline_always && error)
6804 mono_cfg_set_exception (cfg, error->exception_type);
6806 mono_loader_clear_error ();
6810 /*Must verify before creating locals as it can cause the JIT to assert.*/
6811 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6812 mono_metadata_free_mh (cheader);
6816 /* allocate space to store the return value */
6817 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6818 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6821 prev_locals = cfg->locals;
6822 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6823 for (i = 0; i < cheader->num_locals; ++i)
6824 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6826 /* allocate start and end blocks */
6827 /* This is needed so if the inline is aborted, we can clean up */
6828 NEW_BBLOCK (cfg, sbblock);
6829 sbblock->real_offset = real_offset;
6831 NEW_BBLOCK (cfg, ebblock);
6832 ebblock->block_num = cfg->num_bblocks++;
6833 ebblock->real_offset = real_offset;
6835 prev_args = cfg->args;
6836 prev_arg_types = cfg->arg_types;
6837 prev_inlined_method = cfg->inlined_method;
6838 cfg->inlined_method = cmethod;
6839 cfg->ret_var_set = FALSE;
6840 cfg->inline_depth ++;
6841 prev_real_offset = cfg->real_offset;
6842 prev_cbb_hash = cfg->cbb_hash;
6843 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6844 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6845 prev_cil_start = cfg->cil_start;
6846 prev_cbb = cfg->cbb;
6847 prev_current_method = cfg->current_method;
6848 prev_generic_context = cfg->generic_context;
6849 prev_ret_var_set = cfg->ret_var_set;
6850 prev_disable_inline = cfg->disable_inline;
6852 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6855 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6857 ret_var_set = cfg->ret_var_set;
6859 cfg->inlined_method = prev_inlined_method;
6860 cfg->real_offset = prev_real_offset;
6861 cfg->cbb_hash = prev_cbb_hash;
6862 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6863 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6864 cfg->cil_start = prev_cil_start;
6865 cfg->locals = prev_locals;
6866 cfg->args = prev_args;
6867 cfg->arg_types = prev_arg_types;
6868 cfg->current_method = prev_current_method;
6869 cfg->generic_context = prev_generic_context;
6870 cfg->ret_var_set = prev_ret_var_set;
6871 cfg->disable_inline = prev_disable_inline;
6872 cfg->inline_depth --;
6874 if ((costs >= 0 && costs < 60) || inline_always) {
6875 if (cfg->verbose_level > 2)
6876 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6878 cfg->stat_inlined_methods++;
6880 /* always add some code to avoid block split failures */
6881 MONO_INST_NEW (cfg, ins, OP_NOP);
6882 MONO_ADD_INS (prev_cbb, ins);
6884 prev_cbb->next_bb = sbblock;
6885 link_bblock (cfg, prev_cbb, sbblock);
6888 * Get rid of the begin and end bblocks if possible to aid local
6891 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6893 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6894 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6896 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6897 MonoBasicBlock *prev = ebblock->in_bb [0];
6898 mono_merge_basic_blocks (cfg, prev, ebblock);
6900 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6901 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6902 cfg->cbb = prev_cbb;
6906 * Its possible that the rvar is set in some prev bblock, but not in others.
6912 for (i = 0; i < ebblock->in_count; ++i) {
6913 bb = ebblock->in_bb [i];
6915 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6918 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6927 *out_cbb = cfg->cbb;
6931 * If the inlined method contains only a throw, then the ret var is not
6932 * set, so set it to a dummy value.
6935 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6937 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6940 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6943 if (cfg->verbose_level > 2)
6944 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6945 cfg->exception_type = MONO_EXCEPTION_NONE;
6946 mono_loader_clear_error ();
6948 /* This gets rid of the newly added bblocks */
6949 cfg->cbb = prev_cbb;
6951 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6956 * Some of these comments may well be out-of-date.
6957 * Design decisions: we do a single pass over the IL code (and we do bblock
6958 * splitting/merging in the few cases when it's required: a back jump to an IL
6959 * address that was not already seen as bblock starting point).
6960 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6961 * Complex operations are decomposed in simpler ones right away. We need to let the
6962 * arch-specific code peek and poke inside this process somehow (except when the
6963 * optimizations can take advantage of the full semantic info of coarse opcodes).
6964 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6965 * MonoInst->opcode initially is the IL opcode or some simplification of that
6966 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6967 * opcode with value bigger than OP_LAST.
6968 * At this point the IR can be handed over to an interpreter, a dumb code generator
6969 * or to the optimizing code generator that will translate it to SSA form.
6971 * Profiling directed optimizations.
6972 * We may compile by default with few or no optimizations and instrument the code
6973 * or the user may indicate what methods to optimize the most either in a config file
6974 * or through repeated runs where the compiler applies offline the optimizations to
6975 * each method and then decides if it was worth it.
6978 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6979 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6980 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6981 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6982 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6983 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6984 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6985 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6987 /* offset from br.s -> br like opcodes */
6988 #define BIG_BRANCH_OFFSET 13
6991 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6993 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6995 return b == NULL || b == bb;
6999 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7001 unsigned char *ip = start;
7002 unsigned char *target;
7005 MonoBasicBlock *bblock;
7006 const MonoOpcode *opcode;
7009 cli_addr = ip - start;
7010 i = mono_opcode_value ((const guint8 **)&ip, end);
7013 opcode = &mono_opcodes [i];
7014 switch (opcode->argument) {
7015 case MonoInlineNone:
7018 case MonoInlineString:
7019 case MonoInlineType:
7020 case MonoInlineField:
7021 case MonoInlineMethod:
7024 case MonoShortInlineR:
7031 case MonoShortInlineVar:
7032 case MonoShortInlineI:
7035 case MonoShortInlineBrTarget:
7036 target = start + cli_addr + 2 + (signed char)ip [1];
7037 GET_BBLOCK (cfg, bblock, target);
7040 GET_BBLOCK (cfg, bblock, ip);
7042 case MonoInlineBrTarget:
7043 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7044 GET_BBLOCK (cfg, bblock, target);
7047 GET_BBLOCK (cfg, bblock, ip);
7049 case MonoInlineSwitch: {
7050 guint32 n = read32 (ip + 1);
7053 cli_addr += 5 + 4 * n;
7054 target = start + cli_addr;
7055 GET_BBLOCK (cfg, bblock, target);
7057 for (j = 0; j < n; ++j) {
7058 target = start + cli_addr + (gint32)read32 (ip);
7059 GET_BBLOCK (cfg, bblock, target);
7069 g_assert_not_reached ();
7072 if (i == CEE_THROW) {
7073 unsigned char *bb_start = ip - 1;
7075 /* Find the start of the bblock containing the throw */
7077 while ((bb_start >= start) && !bblock) {
7078 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7082 bblock->out_of_line = 1;
7092 static inline MonoMethod *
7093 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7097 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7098 method = mono_method_get_wrapper_data (m, token);
7101 method = mono_class_inflate_generic_method_checked (method, context, &error);
7102 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7105 method = mono_get_method_full (m->klass->image, token, klass, context);
7111 static inline MonoMethod *
7112 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7114 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7116 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7122 static inline MonoClass*
7123 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7128 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7129 klass = mono_method_get_wrapper_data (method, token);
7131 klass = mono_class_inflate_generic_class (klass, context);
7133 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7134 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7137 mono_class_init (klass);
7141 static inline MonoMethodSignature*
7142 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7144 MonoMethodSignature *fsig;
7146 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7149 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7151 fsig = mono_inflate_generic_signature (fsig, context, &error);
7153 g_assert (mono_error_ok (&error));
7156 fsig = mono_metadata_parse_signature (method->klass->image, token);
7162 * Returns TRUE if the JIT should abort inlining because "callee"
7163 * is influenced by security attributes.
7166 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7170 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
7174 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
7175 if (result == MONO_JIT_SECURITY_OK)
7178 if (result == MONO_JIT_LINKDEMAND_ECMA) {
7179 /* Generate code to throw a SecurityException before the actual call/link */
7180 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7183 NEW_ICONST (cfg, args [0], 4);
7184 NEW_METHODCONST (cfg, args [1], caller);
7185 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
7186 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
7187 /* don't hide previous results */
7188 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
7189 cfg->exception_data = result;
7197 throw_exception (void)
7199 static MonoMethod *method = NULL;
7202 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7203 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7210 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7212 MonoMethod *thrower = throw_exception ();
7215 EMIT_NEW_PCONST (cfg, args [0], ex);
7216 mono_emit_method_call (cfg, thrower, args, NULL);
7220 * Return the original method is a wrapper is specified. We can only access
7221 * the custom attributes from the original method.
7224 get_original_method (MonoMethod *method)
7226 if (method->wrapper_type == MONO_WRAPPER_NONE)
7229 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7230 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7233 /* in other cases we need to find the original method */
7234 return mono_marshal_method_from_wrapper (method);
7238 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7239 MonoBasicBlock *bblock, unsigned char *ip)
7241 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7242 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7244 emit_throw_exception (cfg, ex);
7248 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7249 MonoBasicBlock *bblock, unsigned char *ip)
7251 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7252 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7254 emit_throw_exception (cfg, ex);
7258 * Check that the IL instructions at ip are the array initialization
7259 * sequence and return the pointer to the data and the size.
7262 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7265 * newarr[System.Int32]
7267 * ldtoken field valuetype ...
7268 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7270 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7272 guint32 token = read32 (ip + 7);
7273 guint32 field_token = read32 (ip + 2);
7274 guint32 field_index = field_token & 0xffffff;
7276 const char *data_ptr;
7278 MonoMethod *cmethod;
7279 MonoClass *dummy_class;
7280 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7284 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7288 *out_field_token = field_token;
7290 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7293 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7295 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7296 case MONO_TYPE_BOOLEAN:
7300 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7301 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7302 case MONO_TYPE_CHAR:
7319 if (size > mono_type_size (field->type, &dummy_align))
7322 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7323 if (!image_is_dynamic (method->klass->image)) {
7324 field_index = read32 (ip + 2) & 0xffffff;
7325 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7326 data_ptr = mono_image_rva_map (method->klass->image, rva);
7327 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7328 /* for aot code we do the lookup on load */
7329 if (aot && data_ptr)
7330 return GUINT_TO_POINTER (rva);
7332 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7334 data_ptr = mono_field_get_data (field);
7342 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7344 char *method_fname = mono_method_full_name (method, TRUE);
7346 MonoMethodHeader *header = mono_method_get_header (method);
7348 if (header->code_size == 0)
7349 method_code = g_strdup ("method body is empty.");
7351 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7352 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7353 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7354 g_free (method_fname);
7355 g_free (method_code);
7356 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7360 set_exception_object (MonoCompile *cfg, MonoException *exception)
7362 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7363 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7364 cfg->exception_ptr = exception;
7368 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7371 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7372 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7373 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7374 /* Optimize reg-reg moves away */
7376 * Can't optimize other opcodes, since sp[0] might point to
7377 * the last ins of a decomposed opcode.
7379 sp [0]->dreg = (cfg)->locals [n]->dreg;
7381 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7386 * ldloca inhibits many optimizations so try to get rid of it in common
7389 static inline unsigned char *
7390 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7400 local = read16 (ip + 2);
7404 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7405 /* From the INITOBJ case */
7406 token = read32 (ip + 2);
7407 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7408 CHECK_TYPELOAD (klass);
7409 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7410 emit_init_local (cfg, local, type, TRUE);
7418 is_exception_class (MonoClass *class)
7421 if (class == mono_defaults.exception_class)
7423 class = class->parent;
7429 * is_jit_optimizer_disabled:
7431 * Determine whenever M's assembly has a DebuggableAttribute with the
7432 * IsJITOptimizerDisabled flag set.
7435 is_jit_optimizer_disabled (MonoMethod *m)
7437 MonoAssembly *ass = m->klass->image->assembly;
7438 MonoCustomAttrInfo* attrs;
7439 static MonoClass *klass;
7441 gboolean val = FALSE;
7444 if (ass->jit_optimizer_disabled_inited)
7445 return ass->jit_optimizer_disabled;
7448 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7451 ass->jit_optimizer_disabled = FALSE;
7452 mono_memory_barrier ();
7453 ass->jit_optimizer_disabled_inited = TRUE;
7457 attrs = mono_custom_attrs_from_assembly (ass);
7459 for (i = 0; i < attrs->num_attrs; ++i) {
7460 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7463 MonoMethodSignature *sig;
7465 if (!attr->ctor || attr->ctor->klass != klass)
7467 /* Decode the attribute. See reflection.c */
7468 len = attr->data_size;
7469 p = (const char*)attr->data;
7470 g_assert (read16 (p) == 0x0001);
7473 // FIXME: Support named parameters
7474 sig = mono_method_signature (attr->ctor);
7475 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7477 /* Two boolean arguments */
7481 mono_custom_attrs_free (attrs);
7484 ass->jit_optimizer_disabled = val;
7485 mono_memory_barrier ();
7486 ass->jit_optimizer_disabled_inited = TRUE;
7492 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7494 gboolean supported_tail_call;
7497 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7498 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7500 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7503 for (i = 0; i < fsig->param_count; ++i) {
7504 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7505 /* These can point to the current method's stack */
7506 supported_tail_call = FALSE;
7508 if (fsig->hasthis && cmethod->klass->valuetype)
7509 /* this might point to the current method's stack */
7510 supported_tail_call = FALSE;
7511 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7512 supported_tail_call = FALSE;
7513 if (cfg->method->save_lmf)
7514 supported_tail_call = FALSE;
7515 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7516 supported_tail_call = FALSE;
7517 if (call_opcode != CEE_CALL)
7518 supported_tail_call = FALSE;
7520 /* Debugging support */
7522 if (supported_tail_call) {
7523 if (!mono_debug_count ())
7524 supported_tail_call = FALSE;
7528 return supported_tail_call;
7531 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7532 * it to the thread local value based on the tls_offset field. Every other kind of access to
7533 * the field causes an assert.
7536 is_magic_tls_access (MonoClassField *field)
7538 if (strcmp (field->name, "tlsdata"))
7540 if (strcmp (field->parent->name, "ThreadLocal`1"))
7542 return field->parent->image == mono_defaults.corlib;
7545 /* emits the code needed to access a managed tls var (like ThreadStatic)
7546 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7547 * pointer for the current thread.
7548 * Returns the MonoInst* representing the address of the tls var.
7551 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7554 int static_data_reg, array_reg, dreg;
7555 int offset2_reg, idx_reg;
7556 // inlined access to the tls data
7557 // idx = (offset >> 24) - 1;
7558 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
7559 static_data_reg = alloc_ireg (cfg);
7560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7561 idx_reg = alloc_ireg (cfg);
7562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
7563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7565 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7566 array_reg = alloc_ireg (cfg);
7567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7568 offset2_reg = alloc_ireg (cfg);
7569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7570 dreg = alloc_ireg (cfg);
7571 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7576 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7577 * this address is cached per-method in cached_tls_addr.
7580 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7582 MonoInst *load, *addr, *temp, *store, *thread_ins;
7583 MonoClassField *offset_field;
7585 if (*cached_tls_addr) {
7586 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7589 thread_ins = mono_get_thread_intrinsic (cfg);
7590 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7592 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7594 MONO_ADD_INS (cfg->cbb, thread_ins);
7596 MonoMethod *thread_method;
7597 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7598 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7600 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7601 addr->klass = mono_class_from_mono_type (tls_field->type);
7602 addr->type = STACK_MP;
7603 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7604 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7606 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7613 * Handle calls made to ctors from NEWOBJ opcodes.
7615 * REF_BBLOCK will point to the current bblock after the call.
7618 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7619 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7621 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7622 MonoBasicBlock *bblock = *ref_bblock;
7624 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7625 mono_method_is_generic_sharable (cmethod, TRUE)) {
7626 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7627 mono_class_vtable (cfg->domain, cmethod->klass);
7628 CHECK_TYPELOAD (cmethod->klass);
7630 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7631 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7634 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7635 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7637 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7639 CHECK_TYPELOAD (cmethod->klass);
7640 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7645 /* Avoid virtual calls to ctors if possible */
7646 if (mono_class_is_marshalbyref (cmethod->klass))
7647 callvirt_this_arg = sp [0];
7649 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7650 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7651 CHECK_CFG_EXCEPTION;
7652 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7653 mono_method_check_inlining (cfg, cmethod) &&
7654 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7657 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7658 cfg->real_offset += 5;
7660 *inline_costs += costs - 5;
7661 *ref_bblock = bblock;
7663 INLINE_FAILURE ("inline failure");
7664 // FIXME-VT: Clean this up
7665 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7666 GSHAREDVT_FAILURE(*ip);
7667 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7669 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7672 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7673 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7674 } else if (context_used &&
7675 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7676 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7677 MonoInst *cmethod_addr;
7679 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7681 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7682 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7684 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7686 INLINE_FAILURE ("ctor call");
7687 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7688 callvirt_this_arg, NULL, vtable_arg);
7695 * mono_method_to_ir:
7697 * Translate the .net IL into linear IR.
7700 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7701 MonoInst *return_var, MonoInst **inline_args,
7702 guint inline_offset, gboolean is_virtual_call)
7705 MonoInst *ins, **sp, **stack_start;
7706 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7707 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7708 MonoMethod *cmethod, *method_definition;
7709 MonoInst **arg_array;
7710 MonoMethodHeader *header;
7712 guint32 token, ins_flag;
7714 MonoClass *constrained_class = NULL;
7715 unsigned char *ip, *end, *target, *err_pos;
7716 MonoMethodSignature *sig;
7717 MonoGenericContext *generic_context = NULL;
7718 MonoGenericContainer *generic_container = NULL;
7719 MonoType **param_types;
7720 int i, n, start_new_bblock, dreg;
7721 int num_calls = 0, inline_costs = 0;
7722 int breakpoint_id = 0;
7724 MonoBoolean security, pinvoke;
7725 MonoSecurityManager* secman = NULL;
7726 MonoDeclSecurityActions actions;
7727 GSList *class_inits = NULL;
7728 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7730 gboolean init_locals, seq_points, skip_dead_blocks;
7731 gboolean sym_seq_points = FALSE;
7732 MonoInst *cached_tls_addr = NULL;
7733 MonoDebugMethodInfo *minfo;
7734 MonoBitSet *seq_point_locs = NULL;
7735 MonoBitSet *seq_point_set_locs = NULL;
7737 cfg->disable_inline = is_jit_optimizer_disabled (method);
7739 /* serialization and xdomain stuff may need access to private fields and methods */
7740 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7741 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7742 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7743 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7744 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7745 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7747 dont_verify |= mono_security_smcs_hack_enabled ();
7749 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7750 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7751 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7752 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7753 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7755 image = method->klass->image;
7756 header = mono_method_get_header (method);
7758 MonoLoaderError *error;
7760 if ((error = mono_loader_get_last_error ())) {
7761 mono_cfg_set_exception (cfg, error->exception_type);
7763 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7764 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7766 goto exception_exit;
7768 generic_container = mono_method_get_generic_container (method);
7769 sig = mono_method_signature (method);
7770 num_args = sig->hasthis + sig->param_count;
7771 ip = (unsigned char*)header->code;
7772 cfg->cil_start = ip;
7773 end = ip + header->code_size;
7774 cfg->stat_cil_code_size += header->code_size;
7776 seq_points = cfg->gen_seq_points && cfg->method == method;
7777 #ifdef PLATFORM_ANDROID
7778 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7781 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7782 /* We could hit a seq point before attaching to the JIT (#8338) */
7786 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7787 minfo = mono_debug_lookup_method (method);
7789 int i, n_il_offsets;
7793 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7794 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7795 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7796 sym_seq_points = TRUE;
7797 for (i = 0; i < n_il_offsets; ++i) {
7798 if (il_offsets [i] < header->code_size)
7799 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7801 g_free (il_offsets);
7802 g_free (line_numbers);
7803 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7804 /* Methods without line number info like auto-generated property accessors */
7805 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7806 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7807 sym_seq_points = TRUE;
7812 * Methods without init_locals set could cause asserts in various passes
7813 * (#497220). To work around this, we emit dummy initialization opcodes
7814 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7815 * on some platforms.
7817 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7818 init_locals = header->init_locals;
7822 method_definition = method;
7823 while (method_definition->is_inflated) {
7824 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7825 method_definition = imethod->declaring;
7828 /* SkipVerification is not allowed if core-clr is enabled */
7829 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7831 dont_verify_stloc = TRUE;
7834 if (sig->is_inflated)
7835 generic_context = mono_method_get_context (method);
7836 else if (generic_container)
7837 generic_context = &generic_container->context;
7838 cfg->generic_context = generic_context;
7840 if (!cfg->generic_sharing_context)
7841 g_assert (!sig->has_type_parameters);
7843 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7844 g_assert (method->is_inflated);
7845 g_assert (mono_method_get_context (method)->method_inst);
7847 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7848 g_assert (sig->generic_param_count);
7850 if (cfg->method == method) {
7851 cfg->real_offset = 0;
7853 cfg->real_offset = inline_offset;
7856 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7857 cfg->cil_offset_to_bb_len = header->code_size;
7859 cfg->current_method = method;
7861 if (cfg->verbose_level > 2)
7862 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7864 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7866 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7867 for (n = 0; n < sig->param_count; ++n)
7868 param_types [n + sig->hasthis] = sig->params [n];
7869 cfg->arg_types = param_types;
7871 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7872 if (cfg->method == method) {
7874 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7875 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7878 NEW_BBLOCK (cfg, start_bblock);
7879 cfg->bb_entry = start_bblock;
7880 start_bblock->cil_code = NULL;
7881 start_bblock->cil_length = 0;
7882 #if defined(__native_client_codegen__)
7883 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7884 ins->dreg = alloc_dreg (cfg, STACK_I4);
7885 MONO_ADD_INS (start_bblock, ins);
7889 NEW_BBLOCK (cfg, end_bblock);
7890 cfg->bb_exit = end_bblock;
7891 end_bblock->cil_code = NULL;
7892 end_bblock->cil_length = 0;
7893 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7894 g_assert (cfg->num_bblocks == 2);
7896 arg_array = cfg->args;
7898 if (header->num_clauses) {
7899 cfg->spvars = g_hash_table_new (NULL, NULL);
7900 cfg->exvars = g_hash_table_new (NULL, NULL);
7902 /* handle exception clauses */
7903 for (i = 0; i < header->num_clauses; ++i) {
7904 MonoBasicBlock *try_bb;
7905 MonoExceptionClause *clause = &header->clauses [i];
7906 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7907 try_bb->real_offset = clause->try_offset;
7908 try_bb->try_start = TRUE;
7909 try_bb->region = ((i + 1) << 8) | clause->flags;
7910 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7911 tblock->real_offset = clause->handler_offset;
7912 tblock->flags |= BB_EXCEPTION_HANDLER;
7915 * Linking the try block with the EH block hinders inlining as we won't be able to
7916 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7918 if (COMPILE_LLVM (cfg))
7919 link_bblock (cfg, try_bb, tblock);
7921 if (*(ip + clause->handler_offset) == CEE_POP)
7922 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7924 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7925 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7926 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7927 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7928 MONO_ADD_INS (tblock, ins);
7930 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7931 /* finally clauses already have a seq point */
7932 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7933 MONO_ADD_INS (tblock, ins);
7936 /* todo: is a fault block unsafe to optimize? */
7937 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7938 tblock->flags |= BB_EXCEPTION_UNSAFE;
7942 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7944 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7946 /* catch and filter blocks get the exception object on the stack */
7947 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7948 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7949 MonoInst *dummy_use;
7951 /* mostly like handle_stack_args (), but just sets the input args */
7952 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7953 tblock->in_scount = 1;
7954 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7955 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7958 * Add a dummy use for the exvar so its liveness info will be
7962 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7964 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7965 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7966 tblock->flags |= BB_EXCEPTION_HANDLER;
7967 tblock->real_offset = clause->data.filter_offset;
7968 tblock->in_scount = 1;
7969 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7970 /* The filter block shares the exvar with the handler block */
7971 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7972 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7973 MONO_ADD_INS (tblock, ins);
7977 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7978 clause->data.catch_class &&
7979 cfg->generic_sharing_context &&
7980 mono_class_check_context_used (clause->data.catch_class)) {
7982 * In shared generic code with catch
7983 * clauses containing type variables
7984 * the exception handling code has to
7985 * be able to get to the rgctx.
7986 * Therefore we have to make sure that
7987 * the vtable/mrgctx argument (for
7988 * static or generic methods) or the
7989 * "this" argument (for non-static
7990 * methods) are live.
7992 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7993 mini_method_get_context (method)->method_inst ||
7994 method->klass->valuetype) {
7995 mono_get_vtable_var (cfg);
7997 MonoInst *dummy_use;
7999 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8004 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8005 cfg->cbb = start_bblock;
8006 cfg->args = arg_array;
8007 mono_save_args (cfg, sig, inline_args);
8010 /* FIRST CODE BLOCK */
8011 NEW_BBLOCK (cfg, bblock);
8012 bblock->cil_code = ip;
8016 ADD_BBLOCK (cfg, bblock);
8018 if (cfg->method == method) {
8019 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8020 if (breakpoint_id) {
8021 MONO_INST_NEW (cfg, ins, OP_BREAK);
8022 MONO_ADD_INS (bblock, ins);
8026 if (mono_security_cas_enabled ())
8027 secman = mono_security_manager_get_methods ();
8029 security = (secman && mono_security_method_has_declsec (method));
8030 /* at this point having security doesn't mean we have any code to generate */
8031 if (security && (cfg->method == method)) {
8032 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
8033 * And we do not want to enter the next section (with allocation) if we
8034 * have nothing to generate */
8035 security = mono_declsec_get_demands (method, &actions);
8038 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
8039 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
8041 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8042 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8043 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
8045 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
8046 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8050 mono_custom_attrs_free (custom);
8053 custom = mono_custom_attrs_from_class (wrapped->klass);
8054 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8058 mono_custom_attrs_free (custom);
8061 /* not a P/Invoke after all */
8066 /* we use a separate basic block for the initialization code */
8067 NEW_BBLOCK (cfg, init_localsbb);
8068 cfg->bb_init = init_localsbb;
8069 init_localsbb->real_offset = cfg->real_offset;
8070 start_bblock->next_bb = init_localsbb;
8071 init_localsbb->next_bb = bblock;
8072 link_bblock (cfg, start_bblock, init_localsbb);
8073 link_bblock (cfg, init_localsbb, bblock);
8075 cfg->cbb = init_localsbb;
8077 if (cfg->gsharedvt && cfg->method == method) {
8078 MonoGSharedVtMethodInfo *info;
8079 MonoInst *var, *locals_var;
8082 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8083 info->method = cfg->method;
8084 info->count_entries = 16;
8085 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8086 cfg->gsharedvt_info = info;
8088 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8089 /* prevent it from being register allocated */
8090 //var->flags |= MONO_INST_VOLATILE;
8091 cfg->gsharedvt_info_var = var;
8093 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8094 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8096 /* Allocate locals */
8097 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8098 /* prevent it from being register allocated */
8099 //locals_var->flags |= MONO_INST_VOLATILE;
8100 cfg->gsharedvt_locals_var = locals_var;
8102 dreg = alloc_ireg (cfg);
8103 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8105 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8106 ins->dreg = locals_var->dreg;
8108 MONO_ADD_INS (cfg->cbb, ins);
8109 cfg->gsharedvt_locals_var_ins = ins;
8111 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8114 ins->flags |= MONO_INST_INIT;
8118 /* at this point we know, if security is TRUE, that some code needs to be generated */
8119 if (security && (cfg->method == method)) {
8122 cfg->stat_cas_demand_generation++;
8124 if (actions.demand.blob) {
8125 /* Add code for SecurityAction.Demand */
8126 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
8127 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
8128 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8129 mono_emit_method_call (cfg, secman->demand, args, NULL);
8131 if (actions.noncasdemand.blob) {
8132 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
8133 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
8134 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
8135 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
8136 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8137 mono_emit_method_call (cfg, secman->demand, args, NULL);
8139 if (actions.demandchoice.blob) {
8140 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
8141 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
8142 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
8143 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
8144 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
8148 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
8150 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
8153 if (mono_security_core_clr_enabled ()) {
8154 /* check if this is native code, e.g. an icall or a p/invoke */
8155 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8156 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8158 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8159 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8161 /* if this ia a native call then it can only be JITted from platform code */
8162 if ((icall || pinvk) && method->klass && method->klass->image) {
8163 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8164 MonoException *ex = icall ? mono_get_exception_security () :
8165 mono_get_exception_method_access ();
8166 emit_throw_exception (cfg, ex);
8173 CHECK_CFG_EXCEPTION;
8175 if (header->code_size == 0)
8178 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8183 if (cfg->method == method)
8184 mono_debug_init_method (cfg, bblock, breakpoint_id);
8186 for (n = 0; n < header->num_locals; ++n) {
8187 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8192 /* We force the vtable variable here for all shared methods
8193 for the possibility that they might show up in a stack
8194 trace where their exact instantiation is needed. */
8195 if (cfg->generic_sharing_context && method == cfg->method) {
8196 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8197 mini_method_get_context (method)->method_inst ||
8198 method->klass->valuetype) {
8199 mono_get_vtable_var (cfg);
8201 /* FIXME: Is there a better way to do this?
8202 We need the variable live for the duration
8203 of the whole method. */
8204 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8208 /* add a check for this != NULL to inlined methods */
8209 if (is_virtual_call) {
8212 NEW_ARGLOAD (cfg, arg_ins, 0);
8213 MONO_ADD_INS (cfg->cbb, arg_ins);
8214 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8217 skip_dead_blocks = !dont_verify;
8218 if (skip_dead_blocks) {
8219 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8224 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8225 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8228 start_new_bblock = 0;
8231 if (cfg->method == method)
8232 cfg->real_offset = ip - header->code;
8234 cfg->real_offset = inline_offset;
8239 if (start_new_bblock) {
8240 bblock->cil_length = ip - bblock->cil_code;
8241 if (start_new_bblock == 2) {
8242 g_assert (ip == tblock->cil_code);
8244 GET_BBLOCK (cfg, tblock, ip);
8246 bblock->next_bb = tblock;
8249 start_new_bblock = 0;
8250 for (i = 0; i < bblock->in_scount; ++i) {
8251 if (cfg->verbose_level > 3)
8252 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8253 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8257 g_slist_free (class_inits);
8260 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8261 link_bblock (cfg, bblock, tblock);
8262 if (sp != stack_start) {
8263 handle_stack_args (cfg, stack_start, sp - stack_start);
8265 CHECK_UNVERIFIABLE (cfg);
8267 bblock->next_bb = tblock;
8270 for (i = 0; i < bblock->in_scount; ++i) {
8271 if (cfg->verbose_level > 3)
8272 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8273 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8276 g_slist_free (class_inits);
8281 if (skip_dead_blocks) {
8282 int ip_offset = ip - header->code;
8284 if (ip_offset == bb->end)
8288 int op_size = mono_opcode_size (ip, end);
8289 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8291 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8293 if (ip_offset + op_size == bb->end) {
8294 MONO_INST_NEW (cfg, ins, OP_NOP);
8295 MONO_ADD_INS (bblock, ins);
8296 start_new_bblock = 1;
8304 * Sequence points are points where the debugger can place a breakpoint.
8305 * Currently, we generate these automatically at points where the IL
8308 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8310 * Make methods interruptable at the beginning, and at the targets of
8311 * backward branches.
8312 * Also, do this at the start of every bblock in methods with clauses too,
8313 * to be able to handle instructions with inprecise control flow like
8315 * Backward branches are handled at the end of method-to-ir ().
8317 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8319 /* Avoid sequence points on empty IL like .volatile */
8320 // FIXME: Enable this
8321 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8322 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8323 if (sp != stack_start)
8324 ins->flags |= MONO_INST_NONEMPTY_STACK;
8325 MONO_ADD_INS (cfg->cbb, ins);
8328 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8331 bblock->real_offset = cfg->real_offset;
8333 if ((cfg->method == method) && cfg->coverage_info) {
8334 guint32 cil_offset = ip - header->code;
8335 cfg->coverage_info->data [cil_offset].cil_code = ip;
8337 /* TODO: Use an increment here */
8338 #if defined(TARGET_X86)
8339 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8340 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8342 MONO_ADD_INS (cfg->cbb, ins);
8344 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8345 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8349 if (cfg->verbose_level > 3)
8350 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8354 if (seq_points && !sym_seq_points && sp != stack_start) {
8356 * The C# compiler uses these nops to notify the JIT that it should
8357 * insert seq points.
8359 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8360 MONO_ADD_INS (cfg->cbb, ins);
8362 if (cfg->keep_cil_nops)
8363 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8365 MONO_INST_NEW (cfg, ins, OP_NOP);
8367 MONO_ADD_INS (bblock, ins);
8370 if (should_insert_brekpoint (cfg->method)) {
8371 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8373 MONO_INST_NEW (cfg, ins, OP_NOP);
8376 MONO_ADD_INS (bblock, ins);
8382 CHECK_STACK_OVF (1);
8383 n = (*ip)-CEE_LDARG_0;
8385 EMIT_NEW_ARGLOAD (cfg, ins, n);
8393 CHECK_STACK_OVF (1);
8394 n = (*ip)-CEE_LDLOC_0;
8396 EMIT_NEW_LOCLOAD (cfg, ins, n);
8405 n = (*ip)-CEE_STLOC_0;
8408 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8410 emit_stloc_ir (cfg, sp, header, n);
8417 CHECK_STACK_OVF (1);
8420 EMIT_NEW_ARGLOAD (cfg, ins, n);
8426 CHECK_STACK_OVF (1);
8429 NEW_ARGLOADA (cfg, ins, n);
8430 MONO_ADD_INS (cfg->cbb, ins);
8440 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8442 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8447 CHECK_STACK_OVF (1);
8450 EMIT_NEW_LOCLOAD (cfg, ins, n);
8454 case CEE_LDLOCA_S: {
8455 unsigned char *tmp_ip;
8457 CHECK_STACK_OVF (1);
8458 CHECK_LOCAL (ip [1]);
8460 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8466 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8475 CHECK_LOCAL (ip [1]);
8476 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8478 emit_stloc_ir (cfg, sp, header, ip [1]);
8483 CHECK_STACK_OVF (1);
8484 EMIT_NEW_PCONST (cfg, ins, NULL);
8485 ins->type = STACK_OBJ;
8490 CHECK_STACK_OVF (1);
8491 EMIT_NEW_ICONST (cfg, ins, -1);
8504 CHECK_STACK_OVF (1);
8505 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8511 CHECK_STACK_OVF (1);
8513 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8519 CHECK_STACK_OVF (1);
8520 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8526 CHECK_STACK_OVF (1);
8527 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8528 ins->type = STACK_I8;
8529 ins->dreg = alloc_dreg (cfg, STACK_I8);
8531 ins->inst_l = (gint64)read64 (ip);
8532 MONO_ADD_INS (bblock, ins);
8538 gboolean use_aotconst = FALSE;
8540 #ifdef TARGET_POWERPC
8541 /* FIXME: Clean this up */
8542 if (cfg->compile_aot)
8543 use_aotconst = TRUE;
8546 /* FIXME: we should really allocate this only late in the compilation process */
8547 f = mono_domain_alloc (cfg->domain, sizeof (float));
8549 CHECK_STACK_OVF (1);
8555 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8557 dreg = alloc_freg (cfg);
8558 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8559 ins->type = cfg->r4_stack_type;
8561 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8562 ins->type = cfg->r4_stack_type;
8563 ins->dreg = alloc_dreg (cfg, STACK_R8);
8565 MONO_ADD_INS (bblock, ins);
8575 gboolean use_aotconst = FALSE;
8577 #ifdef TARGET_POWERPC
8578 /* FIXME: Clean this up */
8579 if (cfg->compile_aot)
8580 use_aotconst = TRUE;
8583 /* FIXME: we should really allocate this only late in the compilation process */
8584 d = mono_domain_alloc (cfg->domain, sizeof (double));
8586 CHECK_STACK_OVF (1);
8592 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8594 dreg = alloc_freg (cfg);
8595 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8596 ins->type = STACK_R8;
8598 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8599 ins->type = STACK_R8;
8600 ins->dreg = alloc_dreg (cfg, STACK_R8);
8602 MONO_ADD_INS (bblock, ins);
8611 MonoInst *temp, *store;
8613 CHECK_STACK_OVF (1);
8617 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8618 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8620 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8623 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8636 if (sp [0]->type == STACK_R8)
8637 /* we need to pop the value from the x86 FP stack */
8638 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8644 INLINE_FAILURE ("jmp");
8645 GSHAREDVT_FAILURE (*ip);
8648 if (stack_start != sp)
8650 token = read32 (ip + 1);
8651 /* FIXME: check the signature matches */
8652 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8654 if (!cmethod || mono_loader_get_last_error ())
8657 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8658 GENERIC_SHARING_FAILURE (CEE_JMP);
8660 if (mono_security_cas_enabled ())
8661 CHECK_CFG_EXCEPTION;
8663 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8665 if (ARCH_HAVE_OP_TAIL_CALL) {
8666 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8669 /* Handle tail calls similarly to calls */
8670 n = fsig->param_count + fsig->hasthis;
8674 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8675 call->method = cmethod;
8676 call->tail_call = TRUE;
8677 call->signature = mono_method_signature (cmethod);
8678 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8679 call->inst.inst_p0 = cmethod;
8680 for (i = 0; i < n; ++i)
8681 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8683 mono_arch_emit_call (cfg, call);
8684 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8685 MONO_ADD_INS (bblock, (MonoInst*)call);
8687 for (i = 0; i < num_args; ++i)
8688 /* Prevent arguments from being optimized away */
8689 arg_array [i]->flags |= MONO_INST_VOLATILE;
8691 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8692 ins = (MonoInst*)call;
8693 ins->inst_p0 = cmethod;
8694 MONO_ADD_INS (bblock, ins);
8698 start_new_bblock = 1;
8703 case CEE_CALLVIRT: {
8704 MonoInst *addr = NULL;
8705 MonoMethodSignature *fsig = NULL;
8707 int virtual = *ip == CEE_CALLVIRT;
8708 int calli = *ip == CEE_CALLI;
8709 gboolean pass_imt_from_rgctx = FALSE;
8710 MonoInst *imt_arg = NULL;
8711 MonoInst *keep_this_alive = NULL;
8712 gboolean pass_vtable = FALSE;
8713 gboolean pass_mrgctx = FALSE;
8714 MonoInst *vtable_arg = NULL;
8715 gboolean check_this = FALSE;
8716 gboolean supported_tail_call = FALSE;
8717 gboolean tail_call = FALSE;
8718 gboolean need_seq_point = FALSE;
8719 guint32 call_opcode = *ip;
8720 gboolean emit_widen = TRUE;
8721 gboolean push_res = TRUE;
8722 gboolean skip_ret = FALSE;
8723 gboolean delegate_invoke = FALSE;
8726 token = read32 (ip + 1);
8731 //GSHAREDVT_FAILURE (*ip);
8736 fsig = mini_get_signature (method, token, generic_context);
8737 n = fsig->param_count + fsig->hasthis;
8739 if (method->dynamic && fsig->pinvoke) {
8743 * This is a call through a function pointer using a pinvoke
8744 * signature. Have to create a wrapper and call that instead.
8745 * FIXME: This is very slow, need to create a wrapper at JIT time
8746 * instead based on the signature.
8748 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8749 EMIT_NEW_PCONST (cfg, args [1], fsig);
8751 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8754 MonoMethod *cil_method;
8756 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8757 cil_method = cmethod;
8759 if (constrained_class) {
8760 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8761 if (cfg->verbose_level > 2)
8762 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8763 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8764 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8765 cfg->generic_sharing_context)) {
8766 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8770 if (cfg->verbose_level > 2)
8771 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8773 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8775 * This is needed since get_method_constrained can't find
8776 * the method in klass representing a type var.
8777 * The type var is guaranteed to be a reference type in this
8780 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8781 g_assert (!cmethod->klass->valuetype);
8783 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8789 if (!cmethod || mono_loader_get_last_error ())
8791 if (!dont_verify && !cfg->skip_visibility) {
8792 MonoMethod *target_method = cil_method;
8793 if (method->is_inflated) {
8794 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8796 if (!mono_method_can_access_method (method_definition, target_method) &&
8797 !mono_method_can_access_method (method, cil_method))
8798 METHOD_ACCESS_FAILURE (method, cil_method);
8801 if (mono_security_core_clr_enabled ())
8802 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8804 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8805 /* MS.NET seems to silently convert this to a callvirt */
8810 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8811 * converts to a callvirt.
8813 * tests/bug-515884.il is an example of this behavior
8815 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8816 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8817 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8821 if (!cmethod->klass->inited)
8822 if (!mono_class_init (cmethod->klass))
8823 TYPE_LOAD_ERROR (cmethod->klass);
8825 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8826 mini_class_is_system_array (cmethod->klass)) {
8827 array_rank = cmethod->klass->rank;
8828 fsig = mono_method_signature (cmethod);
8830 fsig = mono_method_signature (cmethod);
8835 if (fsig->pinvoke) {
8836 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8837 check_for_pending_exc, cfg->compile_aot);
8838 fsig = mono_method_signature (wrapper);
8839 } else if (constrained_class) {
8840 fsig = mono_method_signature (cmethod);
8842 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8847 mono_save_token_info (cfg, image, token, cil_method);
8849 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8850 need_seq_point = TRUE;
8852 n = fsig->param_count + fsig->hasthis;
8854 /* Don't support calls made using type arguments for now */
8856 if (cfg->gsharedvt) {
8857 if (mini_is_gsharedvt_signature (cfg, fsig))
8858 GSHAREDVT_FAILURE (*ip);
8862 if (mono_security_cas_enabled ()) {
8863 if (check_linkdemand (cfg, method, cmethod))
8864 INLINE_FAILURE ("linkdemand");
8865 CHECK_CFG_EXCEPTION;
8868 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8869 g_assert_not_reached ();
8872 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8875 if (!cfg->generic_sharing_context && cmethod)
8876 g_assert (!mono_method_check_context_used (cmethod));
8880 //g_assert (!virtual || fsig->hasthis);
8884 if (constrained_class) {
8885 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
8886 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8887 /* The 'Own method' case below */
8888 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8889 /* 'The type parameter is instantiated as a reference type' case below. */
8891 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
8892 CHECK_CFG_EXCEPTION;
8899 * We have the `constrained.' prefix opcode.
8901 if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8903 * The type parameter is instantiated as a valuetype,
8904 * but that type doesn't override the method we're
8905 * calling, so we need to box `this'.
8907 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8908 ins->klass = constrained_class;
8909 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8910 CHECK_CFG_EXCEPTION;
8911 } else if (!constrained_class->valuetype) {
8912 int dreg = alloc_ireg_ref (cfg);
8915 * The type parameter is instantiated as a reference
8916 * type. We have a managed pointer on the stack, so
8917 * we need to dereference it here.
8919 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8920 ins->type = STACK_OBJ;
8923 if (cmethod->klass->valuetype) {
8926 /* Interface method */
8929 mono_class_setup_vtable (constrained_class);
8930 CHECK_TYPELOAD (constrained_class);
8931 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8933 TYPE_LOAD_ERROR (constrained_class);
8934 slot = mono_method_get_vtable_slot (cmethod);
8936 TYPE_LOAD_ERROR (cmethod->klass);
8937 cmethod = constrained_class->vtable [ioffset + slot];
8939 if (cmethod->klass == mono_defaults.enum_class) {
8940 /* Enum implements some interfaces, so treat this as the first case */
8941 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8942 ins->klass = constrained_class;
8943 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8944 CHECK_CFG_EXCEPTION;
8949 constrained_class = NULL;
8952 if (!calli && check_call_signature (cfg, fsig, sp))
8955 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8956 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8957 delegate_invoke = TRUE;
8960 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8962 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8963 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8971 * If the callee is a shared method, then its static cctor
8972 * might not get called after the call was patched.
8974 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8975 emit_generic_class_init (cfg, cmethod->klass);
8976 CHECK_TYPELOAD (cmethod->klass);
8980 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8982 if (cfg->generic_sharing_context && cmethod) {
8983 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8985 context_used = mini_method_check_context_used (cfg, cmethod);
8987 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8988 /* Generic method interface
8989 calls are resolved via a
8990 helper function and don't
8992 if (!cmethod_context || !cmethod_context->method_inst)
8993 pass_imt_from_rgctx = TRUE;
8997 * If a shared method calls another
8998 * shared method then the caller must
8999 * have a generic sharing context
9000 * because the magic trampoline
9001 * requires it. FIXME: We shouldn't
9002 * have to force the vtable/mrgctx
9003 * variable here. Instead there
9004 * should be a flag in the cfg to
9005 * request a generic sharing context.
9008 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9009 mono_get_vtable_var (cfg);
9014 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9016 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9018 CHECK_TYPELOAD (cmethod->klass);
9019 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9024 g_assert (!vtable_arg);
9026 if (!cfg->compile_aot) {
9028 * emit_get_rgctx_method () calls mono_class_vtable () so check
9029 * for type load errors before.
9031 mono_class_setup_vtable (cmethod->klass);
9032 CHECK_TYPELOAD (cmethod->klass);
9035 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9037 /* !marshalbyref is needed to properly handle generic methods + remoting */
9038 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9039 MONO_METHOD_IS_FINAL (cmethod)) &&
9040 !mono_class_is_marshalbyref (cmethod->klass)) {
9047 if (pass_imt_from_rgctx) {
9048 g_assert (!pass_vtable);
9051 imt_arg = emit_get_rgctx_method (cfg, context_used,
9052 cmethod, MONO_RGCTX_INFO_METHOD);
9056 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9058 /* Calling virtual generic methods */
9059 if (cmethod && virtual &&
9060 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9061 !(MONO_METHOD_IS_FINAL (cmethod) &&
9062 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9063 fsig->generic_param_count &&
9064 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9065 MonoInst *this_temp, *this_arg_temp, *store;
9066 MonoInst *iargs [4];
9067 gboolean use_imt = FALSE;
9069 g_assert (fsig->is_inflated);
9071 /* Prevent inlining of methods that contain indirect calls */
9072 INLINE_FAILURE ("virtual generic call");
9074 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9075 GSHAREDVT_FAILURE (*ip);
9077 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9078 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
9083 g_assert (!imt_arg);
9085 g_assert (cmethod->is_inflated);
9086 imt_arg = emit_get_rgctx_method (cfg, context_used,
9087 cmethod, MONO_RGCTX_INFO_METHOD);
9088 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9090 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9091 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9092 MONO_ADD_INS (bblock, store);
9094 /* FIXME: This should be a managed pointer */
9095 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9097 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9098 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9099 cmethod, MONO_RGCTX_INFO_METHOD);
9100 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9101 addr = mono_emit_jit_icall (cfg,
9102 mono_helper_compile_generic_method, iargs);
9104 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9106 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9113 * Implement a workaround for the inherent races involved in locking:
9119 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9120 * try block, the Exit () won't be executed, see:
9121 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9122 * To work around this, we extend such try blocks to include the last x bytes
9123 * of the Monitor.Enter () call.
9125 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9126 MonoBasicBlock *tbb;
9128 GET_BBLOCK (cfg, tbb, ip + 5);
9130 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9131 * from Monitor.Enter like ArgumentNullException.
9133 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9134 /* Mark this bblock as needing to be extended */
9135 tbb->extend_try_block = TRUE;
9139 /* Conversion to a JIT intrinsic */
9140 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9142 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9143 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9150 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
9151 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9152 mono_method_check_inlining (cfg, cmethod)) {
9154 gboolean always = FALSE;
9156 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9157 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9158 /* Prevent inlining of methods that call wrappers */
9159 INLINE_FAILURE ("wrapper call");
9160 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9164 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9166 cfg->real_offset += 5;
9168 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9169 /* *sp is already set by inline_method */
9174 inline_costs += costs;
9180 /* Tail recursion elimination */
9181 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9182 gboolean has_vtargs = FALSE;
9185 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9186 INLINE_FAILURE ("tail call");
9188 /* keep it simple */
9189 for (i = fsig->param_count - 1; i >= 0; i--) {
9190 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9195 for (i = 0; i < n; ++i)
9196 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9197 MONO_INST_NEW (cfg, ins, OP_BR);
9198 MONO_ADD_INS (bblock, ins);
9199 tblock = start_bblock->out_bb [0];
9200 link_bblock (cfg, bblock, tblock);
9201 ins->inst_target_bb = tblock;
9202 start_new_bblock = 1;
9204 /* skip the CEE_RET, too */
9205 if (ip_in_bb (cfg, bblock, ip + 5))
9212 inline_costs += 10 * num_calls++;
9215 * Making generic calls out of gsharedvt methods.
9216 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9217 * patching gshared method addresses into a gsharedvt method.
9219 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9220 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9221 MonoRgctxInfoType info_type;
9224 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9225 //GSHAREDVT_FAILURE (*ip);
9226 // disable for possible remoting calls
9227 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9228 GSHAREDVT_FAILURE (*ip);
9229 if (fsig->generic_param_count) {
9230 /* virtual generic call */
9231 g_assert (mono_use_imt);
9232 g_assert (!imt_arg);
9233 /* Same as the virtual generic case above */
9234 imt_arg = emit_get_rgctx_method (cfg, context_used,
9235 cmethod, MONO_RGCTX_INFO_METHOD);
9236 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9238 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9239 /* This can happen when we call a fully instantiated iface method */
9240 imt_arg = emit_get_rgctx_method (cfg, context_used,
9241 cmethod, MONO_RGCTX_INFO_METHOD);
9246 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9247 keep_this_alive = sp [0];
9249 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9250 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9252 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9253 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9255 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9257 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9259 * We pass the address to the gsharedvt trampoline in the rgctx reg
9261 MonoInst *callee = addr;
9263 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9265 GSHAREDVT_FAILURE (*ip);
9267 addr = emit_get_rgctx_sig (cfg, context_used,
9268 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9269 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9273 /* Generic sharing */
9276 * Use this if the callee is gsharedvt sharable too, since
9277 * at runtime we might find an instantiation so the call cannot
9278 * be patched (the 'no_patch' code path in mini-trampolines.c).
9280 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9281 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9282 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9283 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9284 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9285 INLINE_FAILURE ("gshared");
9287 g_assert (cfg->generic_sharing_context && cmethod);
9291 * We are compiling a call to a
9292 * generic method from shared code,
9293 * which means that we have to look up
9294 * the method in the rgctx and do an
9298 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9300 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9301 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9305 /* Indirect calls */
9307 if (call_opcode == CEE_CALL)
9308 g_assert (context_used);
9309 else if (call_opcode == CEE_CALLI)
9310 g_assert (!vtable_arg);
9312 /* FIXME: what the hell is this??? */
9313 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
9314 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
9316 /* Prevent inlining of methods with indirect calls */
9317 INLINE_FAILURE ("indirect call");
9319 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9324 * Instead of emitting an indirect call, emit a direct call
9325 * with the contents of the aotconst as the patch info.
9327 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9328 info_type = addr->inst_c1;
9329 info_data = addr->inst_p0;
9331 info_type = addr->inst_right->inst_c1;
9332 info_data = addr->inst_right->inst_left;
9335 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9336 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9341 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9349 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9350 MonoInst *val = sp [fsig->param_count];
9352 if (val->type == STACK_OBJ) {
9353 MonoInst *iargs [2];
9358 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9361 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9362 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9363 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9364 emit_write_barrier (cfg, addr, val);
9365 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9366 GSHAREDVT_FAILURE (*ip);
9367 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9368 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9370 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9371 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9372 if (!cmethod->klass->element_class->valuetype && !readonly)
9373 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9374 CHECK_TYPELOAD (cmethod->klass);
9377 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9380 g_assert_not_reached ();
9387 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9391 /* Tail prefix / tail call optimization */
9393 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9394 /* FIXME: runtime generic context pointer for jumps? */
9395 /* FIXME: handle this for generic sharing eventually */
9396 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
9397 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9398 supported_tail_call = TRUE;
9400 if (supported_tail_call) {
9403 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9404 INLINE_FAILURE ("tail call");
9406 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9408 if (ARCH_HAVE_OP_TAIL_CALL) {
9409 /* Handle tail calls similarly to normal calls */
9412 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9414 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9415 call->tail_call = TRUE;
9416 call->method = cmethod;
9417 call->signature = mono_method_signature (cmethod);
9420 * We implement tail calls by storing the actual arguments into the
9421 * argument variables, then emitting a CEE_JMP.
9423 for (i = 0; i < n; ++i) {
9424 /* Prevent argument from being register allocated */
9425 arg_array [i]->flags |= MONO_INST_VOLATILE;
9426 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9428 ins = (MonoInst*)call;
9429 ins->inst_p0 = cmethod;
9430 ins->inst_p1 = arg_array [0];
9431 MONO_ADD_INS (bblock, ins);
9432 link_bblock (cfg, bblock, end_bblock);
9433 start_new_bblock = 1;
9435 // FIXME: Eliminate unreachable epilogs
9438 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9439 * only reachable from this call.
9441 GET_BBLOCK (cfg, tblock, ip + 5);
9442 if (tblock == bblock || tblock->in_count == 0)
9451 * Synchronized wrappers.
9452 * Its hard to determine where to replace a method with its synchronized
9453 * wrapper without causing an infinite recursion. The current solution is
9454 * to add the synchronized wrapper in the trampolines, and to
9455 * change the called method to a dummy wrapper, and resolve that wrapper
9456 * to the real method in mono_jit_compile_method ().
9458 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9459 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9460 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9461 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9465 INLINE_FAILURE ("call");
9466 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9467 imt_arg, vtable_arg);
9470 link_bblock (cfg, bblock, end_bblock);
9471 start_new_bblock = 1;
9473 // FIXME: Eliminate unreachable epilogs
9476 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9477 * only reachable from this call.
9479 GET_BBLOCK (cfg, tblock, ip + 5);
9480 if (tblock == bblock || tblock->in_count == 0)
9487 /* End of call, INS should contain the result of the call, if any */
9489 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9492 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9497 if (keep_this_alive) {
9498 MonoInst *dummy_use;
9500 /* See mono_emit_method_call_full () */
9501 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9504 CHECK_CFG_EXCEPTION;
9508 g_assert (*ip == CEE_RET);
9512 constrained_class = NULL;
9514 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9518 if (cfg->method != method) {
9519 /* return from inlined method */
9521 * If in_count == 0, that means the ret is unreachable due to
9522 * being preceeded by a throw. In that case, inline_method () will
9523 * handle setting the return value
9524 * (test case: test_0_inline_throw ()).
9526 if (return_var && cfg->cbb->in_count) {
9527 MonoType *ret_type = mono_method_signature (method)->ret;
9533 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9536 //g_assert (returnvar != -1);
9537 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9538 cfg->ret_var_set = TRUE;
9541 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9543 if (cfg->lmf_var && cfg->cbb->in_count)
9547 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9549 if (seq_points && !sym_seq_points) {
9551 * Place a seq point here too even through the IL stack is not
9552 * empty, so a step over on
9555 * will work correctly.
9557 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9558 MONO_ADD_INS (cfg->cbb, ins);
9561 g_assert (!return_var);
9565 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9568 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9571 if (!cfg->vret_addr) {
9574 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9576 EMIT_NEW_RETLOADA (cfg, ret_addr);
9578 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9579 ins->klass = mono_class_from_mono_type (ret_type);
9582 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9583 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9584 MonoInst *iargs [1];
9588 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9589 mono_arch_emit_setret (cfg, method, conv);
9591 mono_arch_emit_setret (cfg, method, *sp);
9594 mono_arch_emit_setret (cfg, method, *sp);
9599 if (sp != stack_start)
9601 MONO_INST_NEW (cfg, ins, OP_BR);
9603 ins->inst_target_bb = end_bblock;
9604 MONO_ADD_INS (bblock, ins);
9605 link_bblock (cfg, bblock, end_bblock);
9606 start_new_bblock = 1;
9610 MONO_INST_NEW (cfg, ins, OP_BR);
9612 target = ip + 1 + (signed char)(*ip);
9614 GET_BBLOCK (cfg, tblock, target);
9615 link_bblock (cfg, bblock, tblock);
9616 ins->inst_target_bb = tblock;
9617 if (sp != stack_start) {
9618 handle_stack_args (cfg, stack_start, sp - stack_start);
9620 CHECK_UNVERIFIABLE (cfg);
9622 MONO_ADD_INS (bblock, ins);
9623 start_new_bblock = 1;
9624 inline_costs += BRANCH_COST;
9638 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9640 target = ip + 1 + *(signed char*)ip;
9646 inline_costs += BRANCH_COST;
9650 MONO_INST_NEW (cfg, ins, OP_BR);
9653 target = ip + 4 + (gint32)read32(ip);
9655 GET_BBLOCK (cfg, tblock, target);
9656 link_bblock (cfg, bblock, tblock);
9657 ins->inst_target_bb = tblock;
9658 if (sp != stack_start) {
9659 handle_stack_args (cfg, stack_start, sp - stack_start);
9661 CHECK_UNVERIFIABLE (cfg);
9664 MONO_ADD_INS (bblock, ins);
9666 start_new_bblock = 1;
9667 inline_costs += BRANCH_COST;
9674 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9675 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9676 guint32 opsize = is_short ? 1 : 4;
9678 CHECK_OPSIZE (opsize);
9680 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9683 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9688 GET_BBLOCK (cfg, tblock, target);
9689 link_bblock (cfg, bblock, tblock);
9690 GET_BBLOCK (cfg, tblock, ip);
9691 link_bblock (cfg, bblock, tblock);
9693 if (sp != stack_start) {
9694 handle_stack_args (cfg, stack_start, sp - stack_start);
9695 CHECK_UNVERIFIABLE (cfg);
9698 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9699 cmp->sreg1 = sp [0]->dreg;
9700 type_from_op (cfg, cmp, sp [0], NULL);
9703 #if SIZEOF_REGISTER == 4
9704 if (cmp->opcode == OP_LCOMPARE_IMM) {
9705 /* Convert it to OP_LCOMPARE */
9706 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9707 ins->type = STACK_I8;
9708 ins->dreg = alloc_dreg (cfg, STACK_I8);
9710 MONO_ADD_INS (bblock, ins);
9711 cmp->opcode = OP_LCOMPARE;
9712 cmp->sreg2 = ins->dreg;
9715 MONO_ADD_INS (bblock, cmp);
9717 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9718 type_from_op (cfg, ins, sp [0], NULL);
9719 MONO_ADD_INS (bblock, ins);
9720 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9721 GET_BBLOCK (cfg, tblock, target);
9722 ins->inst_true_bb = tblock;
9723 GET_BBLOCK (cfg, tblock, ip);
9724 ins->inst_false_bb = tblock;
9725 start_new_bblock = 2;
9728 inline_costs += BRANCH_COST;
9743 MONO_INST_NEW (cfg, ins, *ip);
9745 target = ip + 4 + (gint32)read32(ip);
9751 inline_costs += BRANCH_COST;
9755 MonoBasicBlock **targets;
9756 MonoBasicBlock *default_bblock;
9757 MonoJumpInfoBBTable *table;
9758 int offset_reg = alloc_preg (cfg);
9759 int target_reg = alloc_preg (cfg);
9760 int table_reg = alloc_preg (cfg);
9761 int sum_reg = alloc_preg (cfg);
9762 gboolean use_op_switch;
9766 n = read32 (ip + 1);
9769 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9773 CHECK_OPSIZE (n * sizeof (guint32));
9774 target = ip + n * sizeof (guint32);
9776 GET_BBLOCK (cfg, default_bblock, target);
9777 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9779 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9780 for (i = 0; i < n; ++i) {
9781 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9782 targets [i] = tblock;
9783 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9787 if (sp != stack_start) {
9789 * Link the current bb with the targets as well, so handle_stack_args
9790 * will set their in_stack correctly.
9792 link_bblock (cfg, bblock, default_bblock);
9793 for (i = 0; i < n; ++i)
9794 link_bblock (cfg, bblock, targets [i]);
9796 handle_stack_args (cfg, stack_start, sp - stack_start);
9798 CHECK_UNVERIFIABLE (cfg);
9801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9802 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9805 for (i = 0; i < n; ++i)
9806 link_bblock (cfg, bblock, targets [i]);
9808 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9809 table->table = targets;
9810 table->table_size = n;
9812 use_op_switch = FALSE;
9814 /* ARM implements SWITCH statements differently */
9815 /* FIXME: Make it use the generic implementation */
9816 if (!cfg->compile_aot)
9817 use_op_switch = TRUE;
9820 if (COMPILE_LLVM (cfg))
9821 use_op_switch = TRUE;
9823 cfg->cbb->has_jump_table = 1;
9825 if (use_op_switch) {
9826 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9827 ins->sreg1 = src1->dreg;
9828 ins->inst_p0 = table;
9829 ins->inst_many_bb = targets;
9830 ins->klass = GUINT_TO_POINTER (n);
9831 MONO_ADD_INS (cfg->cbb, ins);
9833 if (sizeof (gpointer) == 8)
9834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9838 #if SIZEOF_REGISTER == 8
9839 /* The upper word might not be zero, and we add it to a 64 bit address later */
9840 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9843 if (cfg->compile_aot) {
9844 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9846 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9847 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9848 ins->inst_p0 = table;
9849 ins->dreg = table_reg;
9850 MONO_ADD_INS (cfg->cbb, ins);
9853 /* FIXME: Use load_memindex */
9854 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9856 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9858 start_new_bblock = 1;
9859 inline_costs += (BRANCH_COST * 2);
9879 dreg = alloc_freg (cfg);
9882 dreg = alloc_lreg (cfg);
9885 dreg = alloc_ireg_ref (cfg);
9888 dreg = alloc_preg (cfg);
9891 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9892 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9893 if (*ip == CEE_LDIND_R4)
9894 ins->type = cfg->r4_stack_type;
9895 ins->flags |= ins_flag;
9896 MONO_ADD_INS (bblock, ins);
9898 if (ins_flag & MONO_INST_VOLATILE) {
9899 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9900 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9916 if (ins_flag & MONO_INST_VOLATILE) {
9917 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9918 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9921 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9922 ins->flags |= ins_flag;
9925 MONO_ADD_INS (bblock, ins);
9927 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9928 emit_write_barrier (cfg, sp [0], sp [1]);
9937 MONO_INST_NEW (cfg, ins, (*ip));
9939 ins->sreg1 = sp [0]->dreg;
9940 ins->sreg2 = sp [1]->dreg;
9941 type_from_op (cfg, ins, sp [0], sp [1]);
9943 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9945 /* Use the immediate opcodes if possible */
9946 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9947 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9948 if (imm_opcode != -1) {
9949 ins->opcode = imm_opcode;
9950 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9953 NULLIFY_INS (sp [1]);
9957 MONO_ADD_INS ((cfg)->cbb, (ins));
9959 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
9976 MONO_INST_NEW (cfg, ins, (*ip));
9978 ins->sreg1 = sp [0]->dreg;
9979 ins->sreg2 = sp [1]->dreg;
9980 type_from_op (cfg, ins, sp [0], sp [1]);
9982 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9983 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9985 /* FIXME: Pass opcode to is_inst_imm */
9987 /* Use the immediate opcodes if possible */
9988 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9991 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9992 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9993 /* Keep emulated opcodes which are optimized away later */
9994 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9995 imm_opcode = mono_op_to_op_imm (ins->opcode);
9998 if (imm_opcode != -1) {
9999 ins->opcode = imm_opcode;
10000 if (sp [1]->opcode == OP_I8CONST) {
10001 #if SIZEOF_REGISTER == 8
10002 ins->inst_imm = sp [1]->inst_l;
10004 ins->inst_ls_word = sp [1]->inst_ls_word;
10005 ins->inst_ms_word = sp [1]->inst_ms_word;
10009 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10012 /* Might be followed by an instruction added by add_widen_op */
10013 if (sp [1]->next == NULL)
10014 NULLIFY_INS (sp [1]);
10017 MONO_ADD_INS ((cfg)->cbb, (ins));
10019 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10032 case CEE_CONV_OVF_I8:
10033 case CEE_CONV_OVF_U8:
10034 case CEE_CONV_R_UN:
10037 /* Special case this earlier so we have long constants in the IR */
10038 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10039 int data = sp [-1]->inst_c0;
10040 sp [-1]->opcode = OP_I8CONST;
10041 sp [-1]->type = STACK_I8;
10042 #if SIZEOF_REGISTER == 8
10043 if ((*ip) == CEE_CONV_U8)
10044 sp [-1]->inst_c0 = (guint32)data;
10046 sp [-1]->inst_c0 = data;
10048 sp [-1]->inst_ls_word = data;
10049 if ((*ip) == CEE_CONV_U8)
10050 sp [-1]->inst_ms_word = 0;
10052 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10054 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10061 case CEE_CONV_OVF_I4:
10062 case CEE_CONV_OVF_I1:
10063 case CEE_CONV_OVF_I2:
10064 case CEE_CONV_OVF_I:
10065 case CEE_CONV_OVF_U:
10068 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10069 ADD_UNOP (CEE_CONV_OVF_I8);
10076 case CEE_CONV_OVF_U1:
10077 case CEE_CONV_OVF_U2:
10078 case CEE_CONV_OVF_U4:
10081 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10082 ADD_UNOP (CEE_CONV_OVF_U8);
10089 case CEE_CONV_OVF_I1_UN:
10090 case CEE_CONV_OVF_I2_UN:
10091 case CEE_CONV_OVF_I4_UN:
10092 case CEE_CONV_OVF_I8_UN:
10093 case CEE_CONV_OVF_U1_UN:
10094 case CEE_CONV_OVF_U2_UN:
10095 case CEE_CONV_OVF_U4_UN:
10096 case CEE_CONV_OVF_U8_UN:
10097 case CEE_CONV_OVF_I_UN:
10098 case CEE_CONV_OVF_U_UN:
10105 CHECK_CFG_EXCEPTION;
10109 case CEE_ADD_OVF_UN:
10111 case CEE_MUL_OVF_UN:
10113 case CEE_SUB_OVF_UN:
10119 GSHAREDVT_FAILURE (*ip);
10122 token = read32 (ip + 1);
10123 klass = mini_get_class (method, token, generic_context);
10124 CHECK_TYPELOAD (klass);
10126 if (generic_class_is_reference_type (cfg, klass)) {
10127 MonoInst *store, *load;
10128 int dreg = alloc_ireg_ref (cfg);
10130 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10131 load->flags |= ins_flag;
10132 MONO_ADD_INS (cfg->cbb, load);
10134 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10135 store->flags |= ins_flag;
10136 MONO_ADD_INS (cfg->cbb, store);
10138 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10139 emit_write_barrier (cfg, sp [0], sp [1]);
10141 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10147 int loc_index = -1;
10153 token = read32 (ip + 1);
10154 klass = mini_get_class (method, token, generic_context);
10155 CHECK_TYPELOAD (klass);
10157 /* Optimize the common ldobj+stloc combination */
10160 loc_index = ip [6];
10167 loc_index = ip [5] - CEE_STLOC_0;
10174 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10175 CHECK_LOCAL (loc_index);
10177 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10178 ins->dreg = cfg->locals [loc_index]->dreg;
10179 ins->flags |= ins_flag;
10182 if (ins_flag & MONO_INST_VOLATILE) {
10183 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10184 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10190 /* Optimize the ldobj+stobj combination */
10191 /* The reference case ends up being a load+store anyway */
10192 /* Skip this if the operation is volatile. */
10193 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10198 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10205 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10206 ins->flags |= ins_flag;
10209 if (ins_flag & MONO_INST_VOLATILE) {
10210 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10211 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10220 CHECK_STACK_OVF (1);
10222 n = read32 (ip + 1);
10224 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10225 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10226 ins->type = STACK_OBJ;
10229 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10230 MonoInst *iargs [1];
10231 char *str = mono_method_get_wrapper_data (method, n);
10233 if (cfg->compile_aot)
10234 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10236 EMIT_NEW_PCONST (cfg, iargs [0], str);
10237 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10239 if (cfg->opt & MONO_OPT_SHARED) {
10240 MonoInst *iargs [3];
10242 if (cfg->compile_aot) {
10243 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10245 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10246 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10247 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10248 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10249 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10251 if (bblock->out_of_line) {
10252 MonoInst *iargs [2];
10254 if (image == mono_defaults.corlib) {
10256 * Avoid relocations in AOT and save some space by using a
10257 * version of helper_ldstr specialized to mscorlib.
10259 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10260 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10262 /* Avoid creating the string object */
10263 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10264 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10265 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10269 if (cfg->compile_aot) {
10270 NEW_LDSTRCONST (cfg, ins, image, n);
10272 MONO_ADD_INS (bblock, ins);
10275 NEW_PCONST (cfg, ins, NULL);
10276 ins->type = STACK_OBJ;
10277 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10279 OUT_OF_MEMORY_FAILURE;
10282 MONO_ADD_INS (bblock, ins);
10291 MonoInst *iargs [2];
10292 MonoMethodSignature *fsig;
10295 MonoInst *vtable_arg = NULL;
10298 token = read32 (ip + 1);
10299 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10300 if (!cmethod || mono_loader_get_last_error ())
10302 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10305 mono_save_token_info (cfg, image, token, cmethod);
10307 if (!mono_class_init (cmethod->klass))
10308 TYPE_LOAD_ERROR (cmethod->klass);
10310 context_used = mini_method_check_context_used (cfg, cmethod);
10312 if (mono_security_cas_enabled ()) {
10313 if (check_linkdemand (cfg, method, cmethod))
10314 INLINE_FAILURE ("linkdemand");
10315 CHECK_CFG_EXCEPTION;
10316 } else if (mono_security_core_clr_enabled ()) {
10317 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10320 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10321 emit_generic_class_init (cfg, cmethod->klass);
10322 CHECK_TYPELOAD (cmethod->klass);
10326 if (cfg->gsharedvt) {
10327 if (mini_is_gsharedvt_variable_signature (sig))
10328 GSHAREDVT_FAILURE (*ip);
10332 n = fsig->param_count;
10336 * Generate smaller code for the common newobj <exception> instruction in
10337 * argument checking code.
10339 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10340 is_exception_class (cmethod->klass) && n <= 2 &&
10341 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10342 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10343 MonoInst *iargs [3];
10347 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10350 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10353 iargs [1] = sp [0];
10354 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10357 iargs [1] = sp [0];
10358 iargs [2] = sp [1];
10359 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10362 g_assert_not_reached ();
10370 /* move the args to allow room for 'this' in the first position */
10376 /* check_call_signature () requires sp[0] to be set */
10377 this_ins.type = STACK_OBJ;
10378 sp [0] = &this_ins;
10379 if (check_call_signature (cfg, fsig, sp))
10384 if (mini_class_is_system_array (cmethod->klass)) {
10385 *sp = emit_get_rgctx_method (cfg, context_used,
10386 cmethod, MONO_RGCTX_INFO_METHOD);
10388 /* Avoid varargs in the common case */
10389 if (fsig->param_count == 1)
10390 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10391 else if (fsig->param_count == 2)
10392 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10393 else if (fsig->param_count == 3)
10394 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10395 else if (fsig->param_count == 4)
10396 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10398 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10399 } else if (cmethod->string_ctor) {
10400 g_assert (!context_used);
10401 g_assert (!vtable_arg);
10402 /* we simply pass a null pointer */
10403 EMIT_NEW_PCONST (cfg, *sp, NULL);
10404 /* now call the string ctor */
10405 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10407 if (cmethod->klass->valuetype) {
10408 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10409 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10410 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10415 * The code generated by mini_emit_virtual_call () expects
10416 * iargs [0] to be a boxed instance, but luckily the vcall
10417 * will be transformed into a normal call there.
10419 } else if (context_used) {
10420 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10423 MonoVTable *vtable = NULL;
10425 if (!cfg->compile_aot)
10426 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10427 CHECK_TYPELOAD (cmethod->klass);
10430 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10431 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10432 * As a workaround, we call class cctors before allocating objects.
10434 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10435 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10436 if (cfg->verbose_level > 2)
10437 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10438 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10441 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10444 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10447 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10449 /* Now call the actual ctor */
10450 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10451 CHECK_CFG_EXCEPTION;
10454 if (alloc == NULL) {
10456 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10457 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10465 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10466 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10469 case CEE_CASTCLASS:
10473 token = read32 (ip + 1);
10474 klass = mini_get_class (method, token, generic_context);
10475 CHECK_TYPELOAD (klass);
10476 if (sp [0]->type != STACK_OBJ)
10479 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10480 CHECK_CFG_EXCEPTION;
10489 token = read32 (ip + 1);
10490 klass = mini_get_class (method, token, generic_context);
10491 CHECK_TYPELOAD (klass);
10492 if (sp [0]->type != STACK_OBJ)
10495 context_used = mini_class_check_context_used (cfg, klass);
10497 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10498 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10499 MonoInst *args [3];
10505 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10508 if (cfg->compile_aot)
10509 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
10511 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10513 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10516 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10517 MonoMethod *mono_isinst;
10518 MonoInst *iargs [1];
10521 mono_isinst = mono_marshal_get_isinst (klass);
10522 iargs [0] = sp [0];
10524 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10525 iargs, ip, cfg->real_offset, TRUE, &bblock);
10526 CHECK_CFG_EXCEPTION;
10527 g_assert (costs > 0);
10530 cfg->real_offset += 5;
10534 inline_costs += costs;
10537 ins = handle_isinst (cfg, klass, *sp, context_used);
10538 CHECK_CFG_EXCEPTION;
10545 case CEE_UNBOX_ANY: {
10546 MonoInst *res, *addr;
10551 token = read32 (ip + 1);
10552 klass = mini_get_class (method, token, generic_context);
10553 CHECK_TYPELOAD (klass);
10555 mono_save_token_info (cfg, image, token, klass);
10557 context_used = mini_class_check_context_used (cfg, klass);
10559 if (mini_is_gsharedvt_klass (cfg, klass)) {
10560 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10562 } else if (generic_class_is_reference_type (cfg, klass)) {
10563 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10564 CHECK_CFG_EXCEPTION;
10565 } else if (mono_class_is_nullable (klass)) {
10566 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10568 addr = handle_unbox (cfg, klass, sp, context_used);
10570 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10581 MonoClass *enum_class;
10582 MonoMethod *has_flag;
10588 token = read32 (ip + 1);
10589 klass = mini_get_class (method, token, generic_context);
10590 CHECK_TYPELOAD (klass);
10592 mono_save_token_info (cfg, image, token, klass);
10594 context_used = mini_class_check_context_used (cfg, klass);
10596 if (generic_class_is_reference_type (cfg, klass)) {
10602 if (klass == mono_defaults.void_class)
10604 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10606 /* frequent check in generic code: box (struct), brtrue */
10611 * <push int/long ptr>
10614 * constrained. MyFlags
10615 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10617 * If we find this sequence and the operand types on box and constrained
10618 * are equal, we can emit a specialized instruction sequence instead of
10619 * the very slow HasFlag () call.
10621 if ((cfg->opt & MONO_OPT_INTRINS) &&
10622 /* Cheap checks first. */
10623 ip + 5 + 6 + 5 < end &&
10624 ip [5] == CEE_PREFIX1 &&
10625 ip [6] == CEE_CONSTRAINED_ &&
10626 ip [11] == CEE_CALLVIRT &&
10627 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10628 mono_class_is_enum (klass) &&
10629 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10630 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10631 has_flag->klass == mono_defaults.enum_class &&
10632 !strcmp (has_flag->name, "HasFlag") &&
10633 has_flag->signature->hasthis &&
10634 has_flag->signature->param_count == 1) {
10635 CHECK_TYPELOAD (enum_class);
10637 if (enum_class == klass) {
10638 MonoInst *enum_this, *enum_flag;
10643 enum_this = sp [0];
10644 enum_flag = sp [1];
10646 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10651 // FIXME: LLVM can't handle the inconsistent bb linking
10652 if (!mono_class_is_nullable (klass) &&
10653 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10654 (ip [5] == CEE_BRTRUE ||
10655 ip [5] == CEE_BRTRUE_S ||
10656 ip [5] == CEE_BRFALSE ||
10657 ip [5] == CEE_BRFALSE_S)) {
10658 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10660 MonoBasicBlock *true_bb, *false_bb;
10664 if (cfg->verbose_level > 3) {
10665 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10666 printf ("<box+brtrue opt>\n");
10671 case CEE_BRFALSE_S:
10674 target = ip + 1 + (signed char)(*ip);
10681 target = ip + 4 + (gint)(read32 (ip));
10685 g_assert_not_reached ();
10689 * We need to link both bblocks, since it is needed for handling stack
10690 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10691 * Branching to only one of them would lead to inconsistencies, so
10692 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10694 GET_BBLOCK (cfg, true_bb, target);
10695 GET_BBLOCK (cfg, false_bb, ip);
10697 mono_link_bblock (cfg, cfg->cbb, true_bb);
10698 mono_link_bblock (cfg, cfg->cbb, false_bb);
10700 if (sp != stack_start) {
10701 handle_stack_args (cfg, stack_start, sp - stack_start);
10703 CHECK_UNVERIFIABLE (cfg);
10706 if (COMPILE_LLVM (cfg)) {
10707 dreg = alloc_ireg (cfg);
10708 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10709 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10711 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10713 /* The JIT can't eliminate the iconst+compare */
10714 MONO_INST_NEW (cfg, ins, OP_BR);
10715 ins->inst_target_bb = is_true ? true_bb : false_bb;
10716 MONO_ADD_INS (cfg->cbb, ins);
10719 start_new_bblock = 1;
10723 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10725 CHECK_CFG_EXCEPTION;
10734 token = read32 (ip + 1);
10735 klass = mini_get_class (method, token, generic_context);
10736 CHECK_TYPELOAD (klass);
10738 mono_save_token_info (cfg, image, token, klass);
10740 context_used = mini_class_check_context_used (cfg, klass);
10742 if (mono_class_is_nullable (klass)) {
10745 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10746 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10750 ins = handle_unbox (cfg, klass, sp, context_used);
10763 MonoClassField *field;
10764 #ifndef DISABLE_REMOTING
10768 gboolean is_instance;
10770 gpointer addr = NULL;
10771 gboolean is_special_static;
10773 MonoInst *store_val = NULL;
10774 MonoInst *thread_ins;
10777 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10779 if (op == CEE_STFLD) {
10782 store_val = sp [1];
10787 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10789 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10792 if (op == CEE_STSFLD) {
10795 store_val = sp [0];
10800 token = read32 (ip + 1);
10801 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10802 field = mono_method_get_wrapper_data (method, token);
10803 klass = field->parent;
10806 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10809 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10810 FIELD_ACCESS_FAILURE (method, field);
10811 mono_class_init (klass);
10813 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10816 /* if the class is Critical then transparent code cannot access it's fields */
10817 if (!is_instance && mono_security_core_clr_enabled ())
10818 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10820 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10821 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10822 if (mono_security_core_clr_enabled ())
10823 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10827 * LDFLD etc. is usable on static fields as well, so convert those cases to
10830 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10842 g_assert_not_reached ();
10844 is_instance = FALSE;
10847 context_used = mini_class_check_context_used (cfg, klass);
10849 /* INSTANCE CASE */
10851 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10852 if (op == CEE_STFLD) {
10853 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10855 #ifndef DISABLE_REMOTING
10856 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10857 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10858 MonoInst *iargs [5];
10860 GSHAREDVT_FAILURE (op);
10862 iargs [0] = sp [0];
10863 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10864 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10865 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10867 iargs [4] = sp [1];
10869 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10870 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10871 iargs, ip, cfg->real_offset, TRUE, &bblock);
10872 CHECK_CFG_EXCEPTION;
10873 g_assert (costs > 0);
10875 cfg->real_offset += 5;
10877 inline_costs += costs;
10879 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10886 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10888 if (mini_is_gsharedvt_klass (cfg, klass)) {
10889 MonoInst *offset_ins;
10891 context_used = mini_class_check_context_used (cfg, klass);
10893 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10894 dreg = alloc_ireg_mp (cfg);
10895 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10896 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10897 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10899 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10901 if (sp [0]->opcode != OP_LDADDR)
10902 store->flags |= MONO_INST_FAULT;
10904 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10905 /* insert call to write barrier */
10909 dreg = alloc_ireg_mp (cfg);
10910 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10911 emit_write_barrier (cfg, ptr, sp [1]);
10914 store->flags |= ins_flag;
10921 #ifndef DISABLE_REMOTING
10922 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10923 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10924 MonoInst *iargs [4];
10926 GSHAREDVT_FAILURE (op);
10928 iargs [0] = sp [0];
10929 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10930 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10931 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10932 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10933 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10934 iargs, ip, cfg->real_offset, TRUE, &bblock);
10935 CHECK_CFG_EXCEPTION;
10936 g_assert (costs > 0);
10938 cfg->real_offset += 5;
10942 inline_costs += costs;
10944 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10950 if (sp [0]->type == STACK_VTYPE) {
10953 /* Have to compute the address of the variable */
10955 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10957 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10959 g_assert (var->klass == klass);
10961 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10965 if (op == CEE_LDFLDA) {
10966 if (is_magic_tls_access (field)) {
10967 GSHAREDVT_FAILURE (*ip);
10969 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10971 if (sp [0]->type == STACK_OBJ) {
10972 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10973 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10976 dreg = alloc_ireg_mp (cfg);
10978 if (mini_is_gsharedvt_klass (cfg, klass)) {
10979 MonoInst *offset_ins;
10981 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10982 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10984 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10986 ins->klass = mono_class_from_mono_type (field->type);
10987 ins->type = STACK_MP;
10993 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10995 if (mini_is_gsharedvt_klass (cfg, klass)) {
10996 MonoInst *offset_ins;
10998 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10999 dreg = alloc_ireg_mp (cfg);
11000 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11001 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11003 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11005 load->flags |= ins_flag;
11006 if (sp [0]->opcode != OP_LDADDR)
11007 load->flags |= MONO_INST_FAULT;
11021 * We can only support shared generic static
11022 * field access on architectures where the
11023 * trampoline code has been extended to handle
11024 * the generic class init.
11026 #ifndef MONO_ARCH_VTABLE_REG
11027 GENERIC_SHARING_FAILURE (op);
11030 context_used = mini_class_check_context_used (cfg, klass);
11032 ftype = mono_field_get_type (field);
11034 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11037 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11038 * to be called here.
11040 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11041 mono_class_vtable (cfg->domain, klass);
11042 CHECK_TYPELOAD (klass);
11044 mono_domain_lock (cfg->domain);
11045 if (cfg->domain->special_static_fields)
11046 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11047 mono_domain_unlock (cfg->domain);
11049 is_special_static = mono_class_field_is_special_static (field);
11051 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11052 thread_ins = mono_get_thread_intrinsic (cfg);
11056 /* Generate IR to compute the field address */
11057 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11059 * Fast access to TLS data
11060 * Inline version of get_thread_static_data () in
11064 int idx, static_data_reg, array_reg, dreg;
11066 GSHAREDVT_FAILURE (op);
11068 // offset &= 0x7fffffff;
11069 // idx = (offset >> 24) - 1;
11070 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
11071 MONO_ADD_INS (cfg->cbb, thread_ins);
11072 static_data_reg = alloc_ireg (cfg);
11073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11075 if (cfg->compile_aot) {
11076 int offset_reg, offset2_reg, idx_reg;
11078 /* For TLS variables, this will return the TLS offset */
11079 EMIT_NEW_SFLDACONST (cfg, ins, field);
11080 offset_reg = ins->dreg;
11081 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11082 idx_reg = alloc_ireg (cfg);
11083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
11084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
11085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11086 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11087 array_reg = alloc_ireg (cfg);
11088 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11089 offset2_reg = alloc_ireg (cfg);
11090 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
11091 dreg = alloc_ireg (cfg);
11092 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11094 offset = (gsize)addr & 0x7fffffff;
11095 idx = (offset >> 24) - 1;
11097 array_reg = alloc_ireg (cfg);
11098 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11099 dreg = alloc_ireg (cfg);
11100 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
11102 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11103 (cfg->compile_aot && is_special_static) ||
11104 (context_used && is_special_static)) {
11105 MonoInst *iargs [2];
11107 g_assert (field->parent);
11108 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11109 if (context_used) {
11110 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11111 field, MONO_RGCTX_INFO_CLASS_FIELD);
11113 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11115 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11116 } else if (context_used) {
11117 MonoInst *static_data;
11120 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11121 method->klass->name_space, method->klass->name, method->name,
11122 depth, field->offset);
11125 if (mono_class_needs_cctor_run (klass, method))
11126 emit_generic_class_init (cfg, klass);
11129 * The pointer we're computing here is
11131 * super_info.static_data + field->offset
11133 static_data = emit_get_rgctx_klass (cfg, context_used,
11134 klass, MONO_RGCTX_INFO_STATIC_DATA);
11136 if (mini_is_gsharedvt_klass (cfg, klass)) {
11137 MonoInst *offset_ins;
11139 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11140 dreg = alloc_ireg_mp (cfg);
11141 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11142 } else if (field->offset == 0) {
11145 int addr_reg = mono_alloc_preg (cfg);
11146 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11148 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11149 MonoInst *iargs [2];
11151 g_assert (field->parent);
11152 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11153 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11154 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11156 MonoVTable *vtable = NULL;
11158 if (!cfg->compile_aot)
11159 vtable = mono_class_vtable (cfg->domain, klass);
11160 CHECK_TYPELOAD (klass);
11163 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11164 if (!(g_slist_find (class_inits, klass))) {
11165 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11166 if (cfg->verbose_level > 2)
11167 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11168 class_inits = g_slist_prepend (class_inits, klass);
11171 if (cfg->run_cctors) {
11173 /* This makes so that inline cannot trigger */
11174 /* .cctors: too many apps depend on them */
11175 /* running with a specific order... */
11177 if (! vtable->initialized)
11178 INLINE_FAILURE ("class init");
11179 ex = mono_runtime_class_init_full (vtable, FALSE);
11181 set_exception_object (cfg, ex);
11182 goto exception_exit;
11186 if (cfg->compile_aot)
11187 EMIT_NEW_SFLDACONST (cfg, ins, field);
11190 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11192 EMIT_NEW_PCONST (cfg, ins, addr);
11195 MonoInst *iargs [1];
11196 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11197 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11201 /* Generate IR to do the actual load/store operation */
11203 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11204 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11205 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11208 if (op == CEE_LDSFLDA) {
11209 ins->klass = mono_class_from_mono_type (ftype);
11210 ins->type = STACK_PTR;
11212 } else if (op == CEE_STSFLD) {
11215 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11216 store->flags |= ins_flag;
11218 gboolean is_const = FALSE;
11219 MonoVTable *vtable = NULL;
11220 gpointer addr = NULL;
11222 if (!context_used) {
11223 vtable = mono_class_vtable (cfg->domain, klass);
11224 CHECK_TYPELOAD (klass);
11226 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11227 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11228 int ro_type = ftype->type;
11230 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11231 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11232 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11235 GSHAREDVT_FAILURE (op);
11237 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11240 case MONO_TYPE_BOOLEAN:
11242 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11246 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11249 case MONO_TYPE_CHAR:
11251 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11255 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11260 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11264 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11269 case MONO_TYPE_PTR:
11270 case MONO_TYPE_FNPTR:
11271 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11272 type_to_eval_stack_type ((cfg), field->type, *sp);
11275 case MONO_TYPE_STRING:
11276 case MONO_TYPE_OBJECT:
11277 case MONO_TYPE_CLASS:
11278 case MONO_TYPE_SZARRAY:
11279 case MONO_TYPE_ARRAY:
11280 if (!mono_gc_is_moving ()) {
11281 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11282 type_to_eval_stack_type ((cfg), field->type, *sp);
11290 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11295 case MONO_TYPE_VALUETYPE:
11305 CHECK_STACK_OVF (1);
11307 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11308 load->flags |= ins_flag;
11314 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11315 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11316 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11327 token = read32 (ip + 1);
11328 klass = mini_get_class (method, token, generic_context);
11329 CHECK_TYPELOAD (klass);
11330 if (ins_flag & MONO_INST_VOLATILE) {
11331 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11332 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11334 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11335 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11336 ins->flags |= ins_flag;
11337 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11338 generic_class_is_reference_type (cfg, klass)) {
11339 /* insert call to write barrier */
11340 emit_write_barrier (cfg, sp [0], sp [1]);
11352 const char *data_ptr;
11354 guint32 field_token;
11360 token = read32 (ip + 1);
11362 klass = mini_get_class (method, token, generic_context);
11363 CHECK_TYPELOAD (klass);
11365 context_used = mini_class_check_context_used (cfg, klass);
11367 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11368 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11369 ins->sreg1 = sp [0]->dreg;
11370 ins->type = STACK_I4;
11371 ins->dreg = alloc_ireg (cfg);
11372 MONO_ADD_INS (cfg->cbb, ins);
11373 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11376 if (context_used) {
11377 MonoInst *args [3];
11378 MonoClass *array_class = mono_array_class_get (klass, 1);
11379 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11381 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11384 args [0] = emit_get_rgctx_klass (cfg, context_used,
11385 array_class, MONO_RGCTX_INFO_VTABLE);
11390 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11392 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11394 if (cfg->opt & MONO_OPT_SHARED) {
11395 /* Decompose now to avoid problems with references to the domainvar */
11396 MonoInst *iargs [3];
11398 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11399 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11400 iargs [2] = sp [0];
11402 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11404 /* Decompose later since it is needed by abcrem */
11405 MonoClass *array_type = mono_array_class_get (klass, 1);
11406 mono_class_vtable (cfg->domain, array_type);
11407 CHECK_TYPELOAD (array_type);
11409 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11410 ins->dreg = alloc_ireg_ref (cfg);
11411 ins->sreg1 = sp [0]->dreg;
11412 ins->inst_newa_class = klass;
11413 ins->type = STACK_OBJ;
11414 ins->klass = array_type;
11415 MONO_ADD_INS (cfg->cbb, ins);
11416 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11417 cfg->cbb->has_array_access = TRUE;
11419 /* Needed so mono_emit_load_get_addr () gets called */
11420 mono_get_got_var (cfg);
11430 * we inline/optimize the initialization sequence if possible.
11431 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11432 * for small sizes open code the memcpy
11433 * ensure the rva field is big enough
11435 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11436 MonoMethod *memcpy_method = get_memcpy_method ();
11437 MonoInst *iargs [3];
11438 int add_reg = alloc_ireg_mp (cfg);
11440 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11441 if (cfg->compile_aot) {
11442 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11444 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11446 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11447 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11456 if (sp [0]->type != STACK_OBJ)
11459 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11460 ins->dreg = alloc_preg (cfg);
11461 ins->sreg1 = sp [0]->dreg;
11462 ins->type = STACK_I4;
11463 /* This flag will be inherited by the decomposition */
11464 ins->flags |= MONO_INST_FAULT;
11465 MONO_ADD_INS (cfg->cbb, ins);
11466 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11467 cfg->cbb->has_array_access = TRUE;
11475 if (sp [0]->type != STACK_OBJ)
11478 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11480 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11481 CHECK_TYPELOAD (klass);
11482 /* we need to make sure that this array is exactly the type it needs
11483 * to be for correctness. the wrappers are lax with their usage
11484 * so we need to ignore them here
11486 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11487 MonoClass *array_class = mono_array_class_get (klass, 1);
11488 mini_emit_check_array_type (cfg, sp [0], array_class);
11489 CHECK_TYPELOAD (array_class);
11493 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11498 case CEE_LDELEM_I1:
11499 case CEE_LDELEM_U1:
11500 case CEE_LDELEM_I2:
11501 case CEE_LDELEM_U2:
11502 case CEE_LDELEM_I4:
11503 case CEE_LDELEM_U4:
11504 case CEE_LDELEM_I8:
11506 case CEE_LDELEM_R4:
11507 case CEE_LDELEM_R8:
11508 case CEE_LDELEM_REF: {
11514 if (*ip == CEE_LDELEM) {
11516 token = read32 (ip + 1);
11517 klass = mini_get_class (method, token, generic_context);
11518 CHECK_TYPELOAD (klass);
11519 mono_class_init (klass);
11522 klass = array_access_to_klass (*ip);
11524 if (sp [0]->type != STACK_OBJ)
11527 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11529 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11530 // FIXME-VT: OP_ICONST optimization
11531 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11532 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11533 ins->opcode = OP_LOADV_MEMBASE;
11534 } else if (sp [1]->opcode == OP_ICONST) {
11535 int array_reg = sp [0]->dreg;
11536 int index_reg = sp [1]->dreg;
11537 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11539 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11540 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11542 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11543 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11546 if (*ip == CEE_LDELEM)
11553 case CEE_STELEM_I1:
11554 case CEE_STELEM_I2:
11555 case CEE_STELEM_I4:
11556 case CEE_STELEM_I8:
11557 case CEE_STELEM_R4:
11558 case CEE_STELEM_R8:
11559 case CEE_STELEM_REF:
11564 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11566 if (*ip == CEE_STELEM) {
11568 token = read32 (ip + 1);
11569 klass = mini_get_class (method, token, generic_context);
11570 CHECK_TYPELOAD (klass);
11571 mono_class_init (klass);
11574 klass = array_access_to_klass (*ip);
11576 if (sp [0]->type != STACK_OBJ)
11579 emit_array_store (cfg, klass, sp, TRUE);
11581 if (*ip == CEE_STELEM)
11588 case CEE_CKFINITE: {
11592 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11593 ins->sreg1 = sp [0]->dreg;
11594 ins->dreg = alloc_freg (cfg);
11595 ins->type = STACK_R8;
11596 MONO_ADD_INS (bblock, ins);
11598 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11603 case CEE_REFANYVAL: {
11604 MonoInst *src_var, *src;
11606 int klass_reg = alloc_preg (cfg);
11607 int dreg = alloc_preg (cfg);
11609 GSHAREDVT_FAILURE (*ip);
11612 MONO_INST_NEW (cfg, ins, *ip);
11615 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11616 CHECK_TYPELOAD (klass);
11618 context_used = mini_class_check_context_used (cfg, klass);
11621 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11623 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11624 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11627 if (context_used) {
11628 MonoInst *klass_ins;
11630 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11631 klass, MONO_RGCTX_INFO_KLASS);
11634 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11635 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11637 mini_emit_class_check (cfg, klass_reg, klass);
11639 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11640 ins->type = STACK_MP;
11645 case CEE_MKREFANY: {
11646 MonoInst *loc, *addr;
11648 GSHAREDVT_FAILURE (*ip);
11651 MONO_INST_NEW (cfg, ins, *ip);
11654 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11655 CHECK_TYPELOAD (klass);
11657 context_used = mini_class_check_context_used (cfg, klass);
11659 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11660 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11662 if (context_used) {
11663 MonoInst *const_ins;
11664 int type_reg = alloc_preg (cfg);
11666 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11669 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11670 } else if (cfg->compile_aot) {
11671 int const_reg = alloc_preg (cfg);
11672 int type_reg = alloc_preg (cfg);
11674 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11675 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11679 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11680 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11684 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11685 ins->type = STACK_VTYPE;
11686 ins->klass = mono_defaults.typed_reference_class;
11691 case CEE_LDTOKEN: {
11693 MonoClass *handle_class;
11695 CHECK_STACK_OVF (1);
11698 n = read32 (ip + 1);
11700 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11701 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11702 handle = mono_method_get_wrapper_data (method, n);
11703 handle_class = mono_method_get_wrapper_data (method, n + 1);
11704 if (handle_class == mono_defaults.typehandle_class)
11705 handle = &((MonoClass*)handle)->byval_arg;
11708 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11713 mono_class_init (handle_class);
11714 if (cfg->generic_sharing_context) {
11715 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11716 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11717 /* This case handles ldtoken
11718 of an open type, like for
11721 } else if (handle_class == mono_defaults.typehandle_class) {
11722 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11723 } else if (handle_class == mono_defaults.fieldhandle_class)
11724 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11725 else if (handle_class == mono_defaults.methodhandle_class)
11726 context_used = mini_method_check_context_used (cfg, handle);
11728 g_assert_not_reached ();
11731 if ((cfg->opt & MONO_OPT_SHARED) &&
11732 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11733 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11734 MonoInst *addr, *vtvar, *iargs [3];
11735 int method_context_used;
11737 method_context_used = mini_method_check_context_used (cfg, method);
11739 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11741 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11742 EMIT_NEW_ICONST (cfg, iargs [1], n);
11743 if (method_context_used) {
11744 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11745 method, MONO_RGCTX_INFO_METHOD);
11746 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11748 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11749 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11751 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11755 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11757 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11758 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11759 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11760 (cmethod->klass == mono_defaults.systemtype_class) &&
11761 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11762 MonoClass *tclass = mono_class_from_mono_type (handle);
11764 mono_class_init (tclass);
11765 if (context_used) {
11766 ins = emit_get_rgctx_klass (cfg, context_used,
11767 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11768 } else if (cfg->compile_aot) {
11769 if (method->wrapper_type) {
11770 mono_error_init (&error); //got to do it since there are multiple conditionals below
11771 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11772 /* Special case for static synchronized wrappers */
11773 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11775 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11776 /* FIXME: n is not a normal token */
11778 EMIT_NEW_PCONST (cfg, ins, NULL);
11781 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11784 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11786 ins->type = STACK_OBJ;
11787 ins->klass = cmethod->klass;
11790 MonoInst *addr, *vtvar;
11792 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11794 if (context_used) {
11795 if (handle_class == mono_defaults.typehandle_class) {
11796 ins = emit_get_rgctx_klass (cfg, context_used,
11797 mono_class_from_mono_type (handle),
11798 MONO_RGCTX_INFO_TYPE);
11799 } else if (handle_class == mono_defaults.methodhandle_class) {
11800 ins = emit_get_rgctx_method (cfg, context_used,
11801 handle, MONO_RGCTX_INFO_METHOD);
11802 } else if (handle_class == mono_defaults.fieldhandle_class) {
11803 ins = emit_get_rgctx_field (cfg, context_used,
11804 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11806 g_assert_not_reached ();
11808 } else if (cfg->compile_aot) {
11809 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11811 EMIT_NEW_PCONST (cfg, ins, handle);
11813 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11815 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11825 MONO_INST_NEW (cfg, ins, OP_THROW);
11827 ins->sreg1 = sp [0]->dreg;
11829 bblock->out_of_line = TRUE;
11830 MONO_ADD_INS (bblock, ins);
11831 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11832 MONO_ADD_INS (bblock, ins);
11835 link_bblock (cfg, bblock, end_bblock);
11836 start_new_bblock = 1;
11838 case CEE_ENDFINALLY:
11839 /* mono_save_seq_point_info () depends on this */
11840 if (sp != stack_start)
11841 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11842 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11843 MONO_ADD_INS (bblock, ins);
11845 start_new_bblock = 1;
11848 * Control will leave the method so empty the stack, otherwise
11849 * the next basic block will start with a nonempty stack.
11851 while (sp != stack_start) {
11856 case CEE_LEAVE_S: {
11859 if (*ip == CEE_LEAVE) {
11861 target = ip + 5 + (gint32)read32(ip + 1);
11864 target = ip + 2 + (signed char)(ip [1]);
11867 /* empty the stack */
11868 while (sp != stack_start) {
11873 * If this leave statement is in a catch block, check for a
11874 * pending exception, and rethrow it if necessary.
11875 * We avoid doing this in runtime invoke wrappers, since those are called
11876 * by native code which excepts the wrapper to catch all exceptions.
11878 for (i = 0; i < header->num_clauses; ++i) {
11879 MonoExceptionClause *clause = &header->clauses [i];
11882 * Use <= in the final comparison to handle clauses with multiple
11883 * leave statements, like in bug #78024.
11884 * The ordering of the exception clauses guarantees that we find the
11885 * innermost clause.
11887 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11889 MonoBasicBlock *dont_throw;
11894 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11897 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11899 NEW_BBLOCK (cfg, dont_throw);
11902 * Currently, we always rethrow the abort exception, despite the
11903 * fact that this is not correct. See thread6.cs for an example.
11904 * But propagating the abort exception is more important than
11905 * getting the sematics right.
11907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11908 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11909 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11911 MONO_START_BB (cfg, dont_throw);
11916 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11918 MonoExceptionClause *clause;
11920 for (tmp = handlers; tmp; tmp = tmp->next) {
11921 clause = tmp->data;
11922 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11924 link_bblock (cfg, bblock, tblock);
11925 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11926 ins->inst_target_bb = tblock;
11927 ins->inst_eh_block = clause;
11928 MONO_ADD_INS (bblock, ins);
11929 bblock->has_call_handler = 1;
11930 if (COMPILE_LLVM (cfg)) {
11931 MonoBasicBlock *target_bb;
11934 * Link the finally bblock with the target, since it will
11935 * conceptually branch there.
11936 * FIXME: Have to link the bblock containing the endfinally.
11938 GET_BBLOCK (cfg, target_bb, target);
11939 link_bblock (cfg, tblock, target_bb);
11942 g_list_free (handlers);
11945 MONO_INST_NEW (cfg, ins, OP_BR);
11946 MONO_ADD_INS (bblock, ins);
11947 GET_BBLOCK (cfg, tblock, target);
11948 link_bblock (cfg, bblock, tblock);
11949 ins->inst_target_bb = tblock;
11950 start_new_bblock = 1;
11952 if (*ip == CEE_LEAVE)
11961 * Mono specific opcodes
11963 case MONO_CUSTOM_PREFIX: {
11965 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11969 case CEE_MONO_ICALL: {
11971 MonoJitICallInfo *info;
11973 token = read32 (ip + 2);
11974 func = mono_method_get_wrapper_data (method, token);
11975 info = mono_find_jit_icall_by_addr (func);
11977 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11980 CHECK_STACK (info->sig->param_count);
11981 sp -= info->sig->param_count;
11983 ins = mono_emit_jit_icall (cfg, info->func, sp);
11984 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11988 inline_costs += 10 * num_calls++;
11992 case CEE_MONO_LDPTR: {
11995 CHECK_STACK_OVF (1);
11997 token = read32 (ip + 2);
11999 ptr = mono_method_get_wrapper_data (method, token);
12000 /* FIXME: Generalize this */
12001 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
12002 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12007 EMIT_NEW_PCONST (cfg, ins, ptr);
12010 inline_costs += 10 * num_calls++;
12011 /* Can't embed random pointers into AOT code */
12015 case CEE_MONO_JIT_ICALL_ADDR: {
12016 MonoJitICallInfo *callinfo;
12019 CHECK_STACK_OVF (1);
12021 token = read32 (ip + 2);
12023 ptr = mono_method_get_wrapper_data (method, token);
12024 callinfo = mono_find_jit_icall_by_addr (ptr);
12025 g_assert (callinfo);
12026 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12029 inline_costs += 10 * num_calls++;
12032 case CEE_MONO_ICALL_ADDR: {
12033 MonoMethod *cmethod;
12036 CHECK_STACK_OVF (1);
12038 token = read32 (ip + 2);
12040 cmethod = mono_method_get_wrapper_data (method, token);
12042 if (cfg->compile_aot) {
12043 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12045 ptr = mono_lookup_internal_call (cmethod);
12047 EMIT_NEW_PCONST (cfg, ins, ptr);
12053 case CEE_MONO_VTADDR: {
12054 MonoInst *src_var, *src;
12060 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12061 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12066 case CEE_MONO_NEWOBJ: {
12067 MonoInst *iargs [2];
12069 CHECK_STACK_OVF (1);
12071 token = read32 (ip + 2);
12072 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12073 mono_class_init (klass);
12074 NEW_DOMAINCONST (cfg, iargs [0]);
12075 MONO_ADD_INS (cfg->cbb, iargs [0]);
12076 NEW_CLASSCONST (cfg, iargs [1], klass);
12077 MONO_ADD_INS (cfg->cbb, iargs [1]);
12078 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12080 inline_costs += 10 * num_calls++;
12083 case CEE_MONO_OBJADDR:
12086 MONO_INST_NEW (cfg, ins, OP_MOVE);
12087 ins->dreg = alloc_ireg_mp (cfg);
12088 ins->sreg1 = sp [0]->dreg;
12089 ins->type = STACK_MP;
12090 MONO_ADD_INS (cfg->cbb, ins);
12094 case CEE_MONO_LDNATIVEOBJ:
12096 * Similar to LDOBJ, but instead load the unmanaged
12097 * representation of the vtype to the stack.
12102 token = read32 (ip + 2);
12103 klass = mono_method_get_wrapper_data (method, token);
12104 g_assert (klass->valuetype);
12105 mono_class_init (klass);
12108 MonoInst *src, *dest, *temp;
12111 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12112 temp->backend.is_pinvoke = 1;
12113 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12114 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12116 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12117 dest->type = STACK_VTYPE;
12118 dest->klass = klass;
12124 case CEE_MONO_RETOBJ: {
12126 * Same as RET, but return the native representation of a vtype
12129 g_assert (cfg->ret);
12130 g_assert (mono_method_signature (method)->pinvoke);
12135 token = read32 (ip + 2);
12136 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12138 if (!cfg->vret_addr) {
12139 g_assert (cfg->ret_var_is_local);
12141 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12143 EMIT_NEW_RETLOADA (cfg, ins);
12145 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12147 if (sp != stack_start)
12150 MONO_INST_NEW (cfg, ins, OP_BR);
12151 ins->inst_target_bb = end_bblock;
12152 MONO_ADD_INS (bblock, ins);
12153 link_bblock (cfg, bblock, end_bblock);
12154 start_new_bblock = 1;
12158 case CEE_MONO_CISINST:
12159 case CEE_MONO_CCASTCLASS: {
12164 token = read32 (ip + 2);
12165 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12166 if (ip [1] == CEE_MONO_CISINST)
12167 ins = handle_cisinst (cfg, klass, sp [0]);
12169 ins = handle_ccastclass (cfg, klass, sp [0]);
12175 case CEE_MONO_SAVE_LMF:
12176 case CEE_MONO_RESTORE_LMF:
12177 #ifdef MONO_ARCH_HAVE_LMF_OPS
12178 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12179 MONO_ADD_INS (bblock, ins);
12180 cfg->need_lmf_area = TRUE;
12184 case CEE_MONO_CLASSCONST:
12185 CHECK_STACK_OVF (1);
12187 token = read32 (ip + 2);
12188 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12191 inline_costs += 10 * num_calls++;
12193 case CEE_MONO_NOT_TAKEN:
12194 bblock->out_of_line = TRUE;
12197 case CEE_MONO_TLS: {
12200 CHECK_STACK_OVF (1);
12202 key = (gint32)read32 (ip + 2);
12203 g_assert (key < TLS_KEY_NUM);
12205 ins = mono_create_tls_get (cfg, key);
12207 if (cfg->compile_aot) {
12209 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12210 ins->dreg = alloc_preg (cfg);
12211 ins->type = STACK_PTR;
12213 g_assert_not_reached ();
12216 ins->type = STACK_PTR;
12217 MONO_ADD_INS (bblock, ins);
12222 case CEE_MONO_DYN_CALL: {
12223 MonoCallInst *call;
12225 /* It would be easier to call a trampoline, but that would put an
12226 * extra frame on the stack, confusing exception handling. So
12227 * implement it inline using an opcode for now.
12230 if (!cfg->dyn_call_var) {
12231 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12232 /* prevent it from being register allocated */
12233 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12236 /* Has to use a call inst since it local regalloc expects it */
12237 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12238 ins = (MonoInst*)call;
12240 ins->sreg1 = sp [0]->dreg;
12241 ins->sreg2 = sp [1]->dreg;
12242 MONO_ADD_INS (bblock, ins);
12244 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12247 inline_costs += 10 * num_calls++;
12251 case CEE_MONO_MEMORY_BARRIER: {
12253 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12257 case CEE_MONO_JIT_ATTACH: {
12258 MonoInst *args [16], *domain_ins;
12259 MonoInst *ad_ins, *jit_tls_ins;
12260 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12262 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12264 EMIT_NEW_PCONST (cfg, ins, NULL);
12265 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12267 ad_ins = mono_get_domain_intrinsic (cfg);
12268 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12270 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12271 NEW_BBLOCK (cfg, next_bb);
12272 NEW_BBLOCK (cfg, call_bb);
12274 if (cfg->compile_aot) {
12275 /* AOT code is only used in the root domain */
12276 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12278 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12280 MONO_ADD_INS (cfg->cbb, ad_ins);
12281 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12284 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12286 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12288 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12289 MONO_START_BB (cfg, call_bb);
12292 if (cfg->compile_aot) {
12293 /* AOT code is only used in the root domain */
12294 EMIT_NEW_PCONST (cfg, args [0], NULL);
12296 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12298 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12299 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12302 MONO_START_BB (cfg, next_bb);
12308 case CEE_MONO_JIT_DETACH: {
12309 MonoInst *args [16];
12311 /* Restore the original domain */
12312 dreg = alloc_ireg (cfg);
12313 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12314 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12319 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12325 case CEE_PREFIX1: {
12328 case CEE_ARGLIST: {
12329 /* somewhat similar to LDTOKEN */
12330 MonoInst *addr, *vtvar;
12331 CHECK_STACK_OVF (1);
12332 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12334 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12335 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12337 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12338 ins->type = STACK_VTYPE;
12339 ins->klass = mono_defaults.argumenthandle_class;
12349 MonoInst *cmp, *arg1, *arg2;
12357 * The following transforms:
12358 * CEE_CEQ into OP_CEQ
12359 * CEE_CGT into OP_CGT
12360 * CEE_CGT_UN into OP_CGT_UN
12361 * CEE_CLT into OP_CLT
12362 * CEE_CLT_UN into OP_CLT_UN
12364 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12366 MONO_INST_NEW (cfg, ins, cmp->opcode);
12367 cmp->sreg1 = arg1->dreg;
12368 cmp->sreg2 = arg2->dreg;
12369 type_from_op (cfg, cmp, arg1, arg2);
12371 add_widen_op (cfg, cmp, &arg1, &arg2);
12372 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12373 cmp->opcode = OP_LCOMPARE;
12374 else if (arg1->type == STACK_R4)
12375 cmp->opcode = OP_RCOMPARE;
12376 else if (arg1->type == STACK_R8)
12377 cmp->opcode = OP_FCOMPARE;
12379 cmp->opcode = OP_ICOMPARE;
12380 MONO_ADD_INS (bblock, cmp);
12381 ins->type = STACK_I4;
12382 ins->dreg = alloc_dreg (cfg, ins->type);
12383 type_from_op (cfg, ins, arg1, arg2);
12385 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12387 * The backends expect the fceq opcodes to do the
12390 ins->sreg1 = cmp->sreg1;
12391 ins->sreg2 = cmp->sreg2;
12394 MONO_ADD_INS (bblock, ins);
12400 MonoInst *argconst;
12401 MonoMethod *cil_method;
12403 CHECK_STACK_OVF (1);
12405 n = read32 (ip + 2);
12406 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12407 if (!cmethod || mono_loader_get_last_error ())
12409 mono_class_init (cmethod->klass);
12411 mono_save_token_info (cfg, image, n, cmethod);
12413 context_used = mini_method_check_context_used (cfg, cmethod);
12415 cil_method = cmethod;
12416 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12417 METHOD_ACCESS_FAILURE (method, cil_method);
12419 if (mono_security_cas_enabled ()) {
12420 if (check_linkdemand (cfg, method, cmethod))
12421 INLINE_FAILURE ("linkdemand");
12422 CHECK_CFG_EXCEPTION;
12423 } else if (mono_security_core_clr_enabled ()) {
12424 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12428 * Optimize the common case of ldftn+delegate creation
12430 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12431 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12432 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12433 MonoInst *target_ins, *handle_ins;
12434 MonoMethod *invoke;
12435 int invoke_context_used;
12437 invoke = mono_get_delegate_invoke (ctor_method->klass);
12438 if (!invoke || !mono_method_signature (invoke))
12441 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12443 target_ins = sp [-1];
12445 if (mono_security_core_clr_enabled ())
12446 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12448 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12449 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12450 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12452 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12456 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12457 /* FIXME: SGEN support */
12458 if (invoke_context_used == 0) {
12460 if (cfg->verbose_level > 3)
12461 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12462 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12465 CHECK_CFG_EXCEPTION;
12476 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12477 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12481 inline_costs += 10 * num_calls++;
12484 case CEE_LDVIRTFTN: {
12485 MonoInst *args [2];
12489 n = read32 (ip + 2);
12490 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12491 if (!cmethod || mono_loader_get_last_error ())
12493 mono_class_init (cmethod->klass);
12495 context_used = mini_method_check_context_used (cfg, cmethod);
12497 if (mono_security_cas_enabled ()) {
12498 if (check_linkdemand (cfg, method, cmethod))
12499 INLINE_FAILURE ("linkdemand");
12500 CHECK_CFG_EXCEPTION;
12501 } else if (mono_security_core_clr_enabled ()) {
12502 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12506 * Optimize the common case of ldvirtftn+delegate creation
12508 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12509 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12510 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12511 MonoInst *target_ins, *handle_ins;
12512 MonoMethod *invoke;
12513 int invoke_context_used;
12515 invoke = mono_get_delegate_invoke (ctor_method->klass);
12516 if (!invoke || !mono_method_signature (invoke))
12519 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12521 target_ins = sp [-1];
12523 if (mono_security_core_clr_enabled ())
12524 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12526 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12527 /* FIXME: SGEN support */
12528 if (invoke_context_used == 0) {
12530 if (cfg->verbose_level > 3)
12531 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12532 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
12535 CHECK_CFG_EXCEPTION;
12549 args [1] = emit_get_rgctx_method (cfg, context_used,
12550 cmethod, MONO_RGCTX_INFO_METHOD);
12553 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12555 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12558 inline_costs += 10 * num_calls++;
12562 CHECK_STACK_OVF (1);
12564 n = read16 (ip + 2);
12566 EMIT_NEW_ARGLOAD (cfg, ins, n);
12571 CHECK_STACK_OVF (1);
12573 n = read16 (ip + 2);
12575 NEW_ARGLOADA (cfg, ins, n);
12576 MONO_ADD_INS (cfg->cbb, ins);
12584 n = read16 (ip + 2);
12586 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12588 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12592 CHECK_STACK_OVF (1);
12594 n = read16 (ip + 2);
12596 EMIT_NEW_LOCLOAD (cfg, ins, n);
12601 unsigned char *tmp_ip;
12602 CHECK_STACK_OVF (1);
12604 n = read16 (ip + 2);
12607 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12613 EMIT_NEW_LOCLOADA (cfg, ins, n);
12622 n = read16 (ip + 2);
12624 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12626 emit_stloc_ir (cfg, sp, header, n);
12633 if (sp != stack_start)
12635 if (cfg->method != method)
12637 * Inlining this into a loop in a parent could lead to
12638 * stack overflows which is different behavior than the
12639 * non-inlined case, thus disable inlining in this case.
12641 INLINE_FAILURE("localloc");
12643 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12644 ins->dreg = alloc_preg (cfg);
12645 ins->sreg1 = sp [0]->dreg;
12646 ins->type = STACK_PTR;
12647 MONO_ADD_INS (cfg->cbb, ins);
12649 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12651 ins->flags |= MONO_INST_INIT;
12656 case CEE_ENDFILTER: {
12657 MonoExceptionClause *clause, *nearest;
12658 int cc, nearest_num;
12662 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12664 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12665 ins->sreg1 = (*sp)->dreg;
12666 MONO_ADD_INS (bblock, ins);
12667 start_new_bblock = 1;
12672 for (cc = 0; cc < header->num_clauses; ++cc) {
12673 clause = &header->clauses [cc];
12674 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12675 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12676 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12681 g_assert (nearest);
12682 if ((ip - header->code) != nearest->handler_offset)
12687 case CEE_UNALIGNED_:
12688 ins_flag |= MONO_INST_UNALIGNED;
12689 /* FIXME: record alignment? we can assume 1 for now */
12693 case CEE_VOLATILE_:
12694 ins_flag |= MONO_INST_VOLATILE;
12698 ins_flag |= MONO_INST_TAILCALL;
12699 cfg->flags |= MONO_CFG_HAS_TAIL;
12700 /* Can't inline tail calls at this time */
12701 inline_costs += 100000;
12708 token = read32 (ip + 2);
12709 klass = mini_get_class (method, token, generic_context);
12710 CHECK_TYPELOAD (klass);
12711 if (generic_class_is_reference_type (cfg, klass))
12712 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12714 mini_emit_initobj (cfg, *sp, NULL, klass);
12718 case CEE_CONSTRAINED_:
12720 token = read32 (ip + 2);
12721 constrained_class = mini_get_class (method, token, generic_context);
12722 CHECK_TYPELOAD (constrained_class);
12726 case CEE_INITBLK: {
12727 MonoInst *iargs [3];
12731 /* Skip optimized paths for volatile operations. */
12732 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12733 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12734 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12735 /* emit_memset only works when val == 0 */
12736 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12739 iargs [0] = sp [0];
12740 iargs [1] = sp [1];
12741 iargs [2] = sp [2];
12742 if (ip [1] == CEE_CPBLK) {
12744 * FIXME: It's unclear whether we should be emitting both the acquire
12745 * and release barriers for cpblk. It is technically both a load and
12746 * store operation, so it seems like that's the sensible thing to do.
12748 * FIXME: We emit full barriers on both sides of the operation for
12749 * simplicity. We should have a separate atomic memcpy method instead.
12751 MonoMethod *memcpy_method = get_memcpy_method ();
12753 if (ins_flag & MONO_INST_VOLATILE)
12754 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12756 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12757 call->flags |= ins_flag;
12759 if (ins_flag & MONO_INST_VOLATILE)
12760 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12762 MonoMethod *memset_method = get_memset_method ();
12763 if (ins_flag & MONO_INST_VOLATILE) {
12764 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12765 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12767 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12768 call->flags |= ins_flag;
12779 ins_flag |= MONO_INST_NOTYPECHECK;
12781 ins_flag |= MONO_INST_NORANGECHECK;
12782 /* we ignore the no-nullcheck for now since we
12783 * really do it explicitly only when doing callvirt->call
12787 case CEE_RETHROW: {
12789 int handler_offset = -1;
12791 for (i = 0; i < header->num_clauses; ++i) {
12792 MonoExceptionClause *clause = &header->clauses [i];
12793 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12794 handler_offset = clause->handler_offset;
12799 bblock->flags |= BB_EXCEPTION_UNSAFE;
12801 if (handler_offset == -1)
12804 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12805 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12806 ins->sreg1 = load->dreg;
12807 MONO_ADD_INS (bblock, ins);
12809 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12810 MONO_ADD_INS (bblock, ins);
12813 link_bblock (cfg, bblock, end_bblock);
12814 start_new_bblock = 1;
12822 CHECK_STACK_OVF (1);
12824 token = read32 (ip + 2);
12825 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12826 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12829 val = mono_type_size (type, &ialign);
12831 MonoClass *klass = mini_get_class (method, token, generic_context);
12832 CHECK_TYPELOAD (klass);
12834 val = mono_type_size (&klass->byval_arg, &ialign);
12836 if (mini_is_gsharedvt_klass (cfg, klass))
12837 GSHAREDVT_FAILURE (*ip);
12839 EMIT_NEW_ICONST (cfg, ins, val);
12844 case CEE_REFANYTYPE: {
12845 MonoInst *src_var, *src;
12847 GSHAREDVT_FAILURE (*ip);
12853 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12855 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12856 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12857 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12862 case CEE_READONLY_:
12875 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12885 g_warning ("opcode 0x%02x not handled", *ip);
12889 if (start_new_bblock != 1)
12892 bblock->cil_length = ip - bblock->cil_code;
12893 if (bblock->next_bb) {
12894 /* This could already be set because of inlining, #693905 */
12895 MonoBasicBlock *bb = bblock;
12897 while (bb->next_bb)
12899 bb->next_bb = end_bblock;
12901 bblock->next_bb = end_bblock;
12904 if (cfg->method == method && cfg->domainvar) {
12906 MonoInst *get_domain;
12908 cfg->cbb = init_localsbb;
12910 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12911 MONO_ADD_INS (cfg->cbb, get_domain);
12913 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12915 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12916 MONO_ADD_INS (cfg->cbb, store);
12919 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12920 if (cfg->compile_aot)
12921 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12922 mono_get_got_var (cfg);
12925 if (cfg->method == method && cfg->got_var)
12926 mono_emit_load_got_addr (cfg);
12928 if (init_localsbb) {
12929 cfg->cbb = init_localsbb;
12931 for (i = 0; i < header->num_locals; ++i) {
12932 emit_init_local (cfg, i, header->locals [i], init_locals);
12936 if (cfg->init_ref_vars && cfg->method == method) {
12937 /* Emit initialization for ref vars */
12938 // FIXME: Avoid duplication initialization for IL locals.
12939 for (i = 0; i < cfg->num_varinfo; ++i) {
12940 MonoInst *ins = cfg->varinfo [i];
12942 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12943 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12947 if (cfg->lmf_var && cfg->method == method) {
12948 cfg->cbb = init_localsbb;
12949 emit_push_lmf (cfg);
12952 cfg->cbb = init_localsbb;
12953 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12956 MonoBasicBlock *bb;
12959 * Make seq points at backward branch targets interruptable.
12961 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12962 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12963 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12966 /* Add a sequence point for method entry/exit events */
12967 if (cfg->gen_seq_points_debug_data) {
12968 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12969 MONO_ADD_INS (init_localsbb, ins);
12970 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12971 MONO_ADD_INS (cfg->bb_exit, ins);
12975 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12976 * the code they refer to was dead (#11880).
12978 if (sym_seq_points) {
12979 for (i = 0; i < header->code_size; ++i) {
12980 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12983 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12984 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12991 if (cfg->method == method) {
12992 MonoBasicBlock *bb;
12993 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12994 bb->region = mono_find_block_region (cfg, bb->real_offset);
12996 mono_create_spvar_for_region (cfg, bb->region);
12997 if (cfg->verbose_level > 2)
12998 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13002 if (inline_costs < 0) {
13005 /* Method is too large */
13006 mname = mono_method_full_name (method, TRUE);
13007 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13008 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13012 if ((cfg->verbose_level > 2) && (cfg->method == method))
13013 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13018 g_assert (!mono_error_ok (&cfg->error));
13022 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13026 set_exception_type_from_invalid_il (cfg, method, ip);
13030 g_slist_free (class_inits);
13031 mono_basic_block_free (original_bb);
13032 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13033 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13034 if (cfg->exception_type)
13037 return inline_costs;
13041 store_membase_reg_to_store_membase_imm (int opcode)
13044 case OP_STORE_MEMBASE_REG:
13045 return OP_STORE_MEMBASE_IMM;
13046 case OP_STOREI1_MEMBASE_REG:
13047 return OP_STOREI1_MEMBASE_IMM;
13048 case OP_STOREI2_MEMBASE_REG:
13049 return OP_STOREI2_MEMBASE_IMM;
13050 case OP_STOREI4_MEMBASE_REG:
13051 return OP_STOREI4_MEMBASE_IMM;
13052 case OP_STOREI8_MEMBASE_REG:
13053 return OP_STOREI8_MEMBASE_IMM;
13055 g_assert_not_reached ();
13062 mono_op_to_op_imm (int opcode)
13066 return OP_IADD_IMM;
13068 return OP_ISUB_IMM;
13070 return OP_IDIV_IMM;
13072 return OP_IDIV_UN_IMM;
13074 return OP_IREM_IMM;
13076 return OP_IREM_UN_IMM;
13078 return OP_IMUL_IMM;
13080 return OP_IAND_IMM;
13084 return OP_IXOR_IMM;
13086 return OP_ISHL_IMM;
13088 return OP_ISHR_IMM;
13090 return OP_ISHR_UN_IMM;
13093 return OP_LADD_IMM;
13095 return OP_LSUB_IMM;
13097 return OP_LAND_IMM;
13101 return OP_LXOR_IMM;
13103 return OP_LSHL_IMM;
13105 return OP_LSHR_IMM;
13107 return OP_LSHR_UN_IMM;
13108 #if SIZEOF_REGISTER == 8
13110 return OP_LREM_IMM;
13114 return OP_COMPARE_IMM;
13116 return OP_ICOMPARE_IMM;
13118 return OP_LCOMPARE_IMM;
13120 case OP_STORE_MEMBASE_REG:
13121 return OP_STORE_MEMBASE_IMM;
13122 case OP_STOREI1_MEMBASE_REG:
13123 return OP_STOREI1_MEMBASE_IMM;
13124 case OP_STOREI2_MEMBASE_REG:
13125 return OP_STOREI2_MEMBASE_IMM;
13126 case OP_STOREI4_MEMBASE_REG:
13127 return OP_STOREI4_MEMBASE_IMM;
13129 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13131 return OP_X86_PUSH_IMM;
13132 case OP_X86_COMPARE_MEMBASE_REG:
13133 return OP_X86_COMPARE_MEMBASE_IMM;
13135 #if defined(TARGET_AMD64)
13136 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13137 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13139 case OP_VOIDCALL_REG:
13140 return OP_VOIDCALL;
13148 return OP_LOCALLOC_IMM;
13155 ldind_to_load_membase (int opcode)
13159 return OP_LOADI1_MEMBASE;
13161 return OP_LOADU1_MEMBASE;
13163 return OP_LOADI2_MEMBASE;
13165 return OP_LOADU2_MEMBASE;
13167 return OP_LOADI4_MEMBASE;
13169 return OP_LOADU4_MEMBASE;
13171 return OP_LOAD_MEMBASE;
13172 case CEE_LDIND_REF:
13173 return OP_LOAD_MEMBASE;
13175 return OP_LOADI8_MEMBASE;
13177 return OP_LOADR4_MEMBASE;
13179 return OP_LOADR8_MEMBASE;
13181 g_assert_not_reached ();
13188 stind_to_store_membase (int opcode)
13192 return OP_STOREI1_MEMBASE_REG;
13194 return OP_STOREI2_MEMBASE_REG;
13196 return OP_STOREI4_MEMBASE_REG;
13198 case CEE_STIND_REF:
13199 return OP_STORE_MEMBASE_REG;
13201 return OP_STOREI8_MEMBASE_REG;
13203 return OP_STORER4_MEMBASE_REG;
13205 return OP_STORER8_MEMBASE_REG;
13207 g_assert_not_reached ();
13214 mono_load_membase_to_load_mem (int opcode)
13216 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13217 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13219 case OP_LOAD_MEMBASE:
13220 return OP_LOAD_MEM;
13221 case OP_LOADU1_MEMBASE:
13222 return OP_LOADU1_MEM;
13223 case OP_LOADU2_MEMBASE:
13224 return OP_LOADU2_MEM;
13225 case OP_LOADI4_MEMBASE:
13226 return OP_LOADI4_MEM;
13227 case OP_LOADU4_MEMBASE:
13228 return OP_LOADU4_MEM;
13229 #if SIZEOF_REGISTER == 8
13230 case OP_LOADI8_MEMBASE:
13231 return OP_LOADI8_MEM;
13240 op_to_op_dest_membase (int store_opcode, int opcode)
13242 #if defined(TARGET_X86)
13243 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13248 return OP_X86_ADD_MEMBASE_REG;
13250 return OP_X86_SUB_MEMBASE_REG;
13252 return OP_X86_AND_MEMBASE_REG;
13254 return OP_X86_OR_MEMBASE_REG;
13256 return OP_X86_XOR_MEMBASE_REG;
13259 return OP_X86_ADD_MEMBASE_IMM;
13262 return OP_X86_SUB_MEMBASE_IMM;
13265 return OP_X86_AND_MEMBASE_IMM;
13268 return OP_X86_OR_MEMBASE_IMM;
13271 return OP_X86_XOR_MEMBASE_IMM;
13277 #if defined(TARGET_AMD64)
13278 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13283 return OP_X86_ADD_MEMBASE_REG;
13285 return OP_X86_SUB_MEMBASE_REG;
13287 return OP_X86_AND_MEMBASE_REG;
13289 return OP_X86_OR_MEMBASE_REG;
13291 return OP_X86_XOR_MEMBASE_REG;
13293 return OP_X86_ADD_MEMBASE_IMM;
13295 return OP_X86_SUB_MEMBASE_IMM;
13297 return OP_X86_AND_MEMBASE_IMM;
13299 return OP_X86_OR_MEMBASE_IMM;
13301 return OP_X86_XOR_MEMBASE_IMM;
13303 return OP_AMD64_ADD_MEMBASE_REG;
13305 return OP_AMD64_SUB_MEMBASE_REG;
13307 return OP_AMD64_AND_MEMBASE_REG;
13309 return OP_AMD64_OR_MEMBASE_REG;
13311 return OP_AMD64_XOR_MEMBASE_REG;
13314 return OP_AMD64_ADD_MEMBASE_IMM;
13317 return OP_AMD64_SUB_MEMBASE_IMM;
13320 return OP_AMD64_AND_MEMBASE_IMM;
13323 return OP_AMD64_OR_MEMBASE_IMM;
13326 return OP_AMD64_XOR_MEMBASE_IMM;
13336 op_to_op_store_membase (int store_opcode, int opcode)
13338 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13341 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13342 return OP_X86_SETEQ_MEMBASE;
13344 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13345 return OP_X86_SETNE_MEMBASE;
13353 op_to_op_src1_membase (int load_opcode, int opcode)
13356 /* FIXME: This has sign extension issues */
13358 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13359 return OP_X86_COMPARE_MEMBASE8_IMM;
13362 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13367 return OP_X86_PUSH_MEMBASE;
13368 case OP_COMPARE_IMM:
13369 case OP_ICOMPARE_IMM:
13370 return OP_X86_COMPARE_MEMBASE_IMM;
13373 return OP_X86_COMPARE_MEMBASE_REG;
13377 #ifdef TARGET_AMD64
13378 /* FIXME: This has sign extension issues */
13380 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13381 return OP_X86_COMPARE_MEMBASE8_IMM;
13386 #ifdef __mono_ilp32__
13387 if (load_opcode == OP_LOADI8_MEMBASE)
13389 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13391 return OP_X86_PUSH_MEMBASE;
13393 /* FIXME: This only works for 32 bit immediates
13394 case OP_COMPARE_IMM:
13395 case OP_LCOMPARE_IMM:
13396 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13397 return OP_AMD64_COMPARE_MEMBASE_IMM;
13399 case OP_ICOMPARE_IMM:
13400 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13401 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13405 #ifdef __mono_ilp32__
13406 if (load_opcode == OP_LOAD_MEMBASE)
13407 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13408 if (load_opcode == OP_LOADI8_MEMBASE)
13410 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13412 return OP_AMD64_COMPARE_MEMBASE_REG;
13415 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13416 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13425 op_to_op_src2_membase (int load_opcode, int opcode)
13428 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13434 return OP_X86_COMPARE_REG_MEMBASE;
13436 return OP_X86_ADD_REG_MEMBASE;
13438 return OP_X86_SUB_REG_MEMBASE;
13440 return OP_X86_AND_REG_MEMBASE;
13442 return OP_X86_OR_REG_MEMBASE;
13444 return OP_X86_XOR_REG_MEMBASE;
13448 #ifdef TARGET_AMD64
13449 #ifdef __mono_ilp32__
13450 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13452 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13456 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13458 return OP_X86_ADD_REG_MEMBASE;
13460 return OP_X86_SUB_REG_MEMBASE;
13462 return OP_X86_AND_REG_MEMBASE;
13464 return OP_X86_OR_REG_MEMBASE;
13466 return OP_X86_XOR_REG_MEMBASE;
13468 #ifdef __mono_ilp32__
13469 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13471 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13476 return OP_AMD64_COMPARE_REG_MEMBASE;
13478 return OP_AMD64_ADD_REG_MEMBASE;
13480 return OP_AMD64_SUB_REG_MEMBASE;
13482 return OP_AMD64_AND_REG_MEMBASE;
13484 return OP_AMD64_OR_REG_MEMBASE;
13486 return OP_AMD64_XOR_REG_MEMBASE;
13495 mono_op_to_op_imm_noemul (int opcode)
13498 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13504 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13511 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13516 return mono_op_to_op_imm (opcode);
13521 * mono_handle_global_vregs:
13523 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13527 mono_handle_global_vregs (MonoCompile *cfg)
13529 gint32 *vreg_to_bb;
13530 MonoBasicBlock *bb;
13533 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13535 #ifdef MONO_ARCH_SIMD_INTRINSICS
13536 if (cfg->uses_simd_intrinsics)
13537 mono_simd_simplify_indirection (cfg);
13540 /* Find local vregs used in more than one bb */
13541 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13542 MonoInst *ins = bb->code;
13543 int block_num = bb->block_num;
13545 if (cfg->verbose_level > 2)
13546 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13549 for (; ins; ins = ins->next) {
13550 const char *spec = INS_INFO (ins->opcode);
13551 int regtype = 0, regindex;
13554 if (G_UNLIKELY (cfg->verbose_level > 2))
13555 mono_print_ins (ins);
13557 g_assert (ins->opcode >= MONO_CEE_LAST);
13559 for (regindex = 0; regindex < 4; regindex ++) {
13562 if (regindex == 0) {
13563 regtype = spec [MONO_INST_DEST];
13564 if (regtype == ' ')
13567 } else if (regindex == 1) {
13568 regtype = spec [MONO_INST_SRC1];
13569 if (regtype == ' ')
13572 } else if (regindex == 2) {
13573 regtype = spec [MONO_INST_SRC2];
13574 if (regtype == ' ')
13577 } else if (regindex == 3) {
13578 regtype = spec [MONO_INST_SRC3];
13579 if (regtype == ' ')
13584 #if SIZEOF_REGISTER == 4
13585 /* In the LLVM case, the long opcodes are not decomposed */
13586 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13588 * Since some instructions reference the original long vreg,
13589 * and some reference the two component vregs, it is quite hard
13590 * to determine when it needs to be global. So be conservative.
13592 if (!get_vreg_to_inst (cfg, vreg)) {
13593 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13595 if (cfg->verbose_level > 2)
13596 printf ("LONG VREG R%d made global.\n", vreg);
13600 * Make the component vregs volatile since the optimizations can
13601 * get confused otherwise.
13603 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13604 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13608 g_assert (vreg != -1);
13610 prev_bb = vreg_to_bb [vreg];
13611 if (prev_bb == 0) {
13612 /* 0 is a valid block num */
13613 vreg_to_bb [vreg] = block_num + 1;
13614 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13615 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13618 if (!get_vreg_to_inst (cfg, vreg)) {
13619 if (G_UNLIKELY (cfg->verbose_level > 2))
13620 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13624 if (vreg_is_ref (cfg, vreg))
13625 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13627 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13630 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13633 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13636 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13639 g_assert_not_reached ();
13643 /* Flag as having been used in more than one bb */
13644 vreg_to_bb [vreg] = -1;
13650 /* If a variable is used in only one bblock, convert it into a local vreg */
13651 for (i = 0; i < cfg->num_varinfo; i++) {
13652 MonoInst *var = cfg->varinfo [i];
13653 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13655 switch (var->type) {
13661 #if SIZEOF_REGISTER == 8
13664 #if !defined(TARGET_X86)
13665 /* Enabling this screws up the fp stack on x86 */
13668 if (mono_arch_is_soft_float ())
13671 /* Arguments are implicitly global */
13672 /* Putting R4 vars into registers doesn't work currently */
13673 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13674 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13676 * Make that the variable's liveness interval doesn't contain a call, since
13677 * that would cause the lvreg to be spilled, making the whole optimization
13680 /* This is too slow for JIT compilation */
13682 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13684 int def_index, call_index, ins_index;
13685 gboolean spilled = FALSE;
13690 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13691 const char *spec = INS_INFO (ins->opcode);
13693 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13694 def_index = ins_index;
13696 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13697 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13698 if (call_index > def_index) {
13704 if (MONO_IS_CALL (ins))
13705 call_index = ins_index;
13715 if (G_UNLIKELY (cfg->verbose_level > 2))
13716 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13717 var->flags |= MONO_INST_IS_DEAD;
13718 cfg->vreg_to_inst [var->dreg] = NULL;
13725 * Compress the varinfo and vars tables so the liveness computation is faster and
13726 * takes up less space.
13729 for (i = 0; i < cfg->num_varinfo; ++i) {
13730 MonoInst *var = cfg->varinfo [i];
13731 if (pos < i && cfg->locals_start == i)
13732 cfg->locals_start = pos;
13733 if (!(var->flags & MONO_INST_IS_DEAD)) {
13735 cfg->varinfo [pos] = cfg->varinfo [i];
13736 cfg->varinfo [pos]->inst_c0 = pos;
13737 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13738 cfg->vars [pos].idx = pos;
13739 #if SIZEOF_REGISTER == 4
13740 if (cfg->varinfo [pos]->type == STACK_I8) {
13741 /* Modify the two component vars too */
13744 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13745 var1->inst_c0 = pos;
13746 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13747 var1->inst_c0 = pos;
13754 cfg->num_varinfo = pos;
13755 if (cfg->locals_start > cfg->num_varinfo)
13756 cfg->locals_start = cfg->num_varinfo;
13760 * mono_spill_global_vars:
13762 * Generate spill code for variables which are not allocated to registers,
13763 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13764 * code is generated which could be optimized by the local optimization passes.
13767 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13769 MonoBasicBlock *bb;
13771 int orig_next_vreg;
13772 guint32 *vreg_to_lvreg;
13774 guint32 i, lvregs_len;
13775 gboolean dest_has_lvreg = FALSE;
13776 guint32 stacktypes [128];
13777 MonoInst **live_range_start, **live_range_end;
13778 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13779 int *gsharedvt_vreg_to_idx = NULL;
13781 *need_local_opts = FALSE;
13783 memset (spec2, 0, sizeof (spec2));
13785 /* FIXME: Move this function to mini.c */
13786 stacktypes ['i'] = STACK_PTR;
13787 stacktypes ['l'] = STACK_I8;
13788 stacktypes ['f'] = STACK_R8;
13789 #ifdef MONO_ARCH_SIMD_INTRINSICS
13790 stacktypes ['x'] = STACK_VTYPE;
13793 #if SIZEOF_REGISTER == 4
13794 /* Create MonoInsts for longs */
13795 for (i = 0; i < cfg->num_varinfo; i++) {
13796 MonoInst *ins = cfg->varinfo [i];
13798 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13799 switch (ins->type) {
13804 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13807 g_assert (ins->opcode == OP_REGOFFSET);
13809 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13811 tree->opcode = OP_REGOFFSET;
13812 tree->inst_basereg = ins->inst_basereg;
13813 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13815 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13817 tree->opcode = OP_REGOFFSET;
13818 tree->inst_basereg = ins->inst_basereg;
13819 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13829 if (cfg->compute_gc_maps) {
13830 /* registers need liveness info even for !non refs */
13831 for (i = 0; i < cfg->num_varinfo; i++) {
13832 MonoInst *ins = cfg->varinfo [i];
13834 if (ins->opcode == OP_REGVAR)
13835 ins->flags |= MONO_INST_GC_TRACK;
13839 if (cfg->gsharedvt) {
13840 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13842 for (i = 0; i < cfg->num_varinfo; ++i) {
13843 MonoInst *ins = cfg->varinfo [i];
13846 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13847 if (i >= cfg->locals_start) {
13849 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13850 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13851 ins->opcode = OP_GSHAREDVT_LOCAL;
13852 ins->inst_imm = idx;
13855 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13856 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13862 /* FIXME: widening and truncation */
13865 * As an optimization, when a variable allocated to the stack is first loaded into
13866 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13867 * the variable again.
13869 orig_next_vreg = cfg->next_vreg;
13870 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13871 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13875 * These arrays contain the first and last instructions accessing a given
13877 * Since we emit bblocks in the same order we process them here, and we
13878 * don't split live ranges, these will precisely describe the live range of
13879 * the variable, i.e. the instruction range where a valid value can be found
13880 * in the variables location.
13881 * The live range is computed using the liveness info computed by the liveness pass.
13882 * We can't use vmv->range, since that is an abstract live range, and we need
13883 * one which is instruction precise.
13884 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13886 /* FIXME: Only do this if debugging info is requested */
13887 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13888 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13889 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13890 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13892 /* Add spill loads/stores */
13893 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13896 if (cfg->verbose_level > 2)
13897 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13899 /* Clear vreg_to_lvreg array */
13900 for (i = 0; i < lvregs_len; i++)
13901 vreg_to_lvreg [lvregs [i]] = 0;
13905 MONO_BB_FOR_EACH_INS (bb, ins) {
13906 const char *spec = INS_INFO (ins->opcode);
13907 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13908 gboolean store, no_lvreg;
13909 int sregs [MONO_MAX_SRC_REGS];
13911 if (G_UNLIKELY (cfg->verbose_level > 2))
13912 mono_print_ins (ins);
13914 if (ins->opcode == OP_NOP)
13918 * We handle LDADDR here as well, since it can only be decomposed
13919 * when variable addresses are known.
13921 if (ins->opcode == OP_LDADDR) {
13922 MonoInst *var = ins->inst_p0;
13924 if (var->opcode == OP_VTARG_ADDR) {
13925 /* Happens on SPARC/S390 where vtypes are passed by reference */
13926 MonoInst *vtaddr = var->inst_left;
13927 if (vtaddr->opcode == OP_REGVAR) {
13928 ins->opcode = OP_MOVE;
13929 ins->sreg1 = vtaddr->dreg;
13931 else if (var->inst_left->opcode == OP_REGOFFSET) {
13932 ins->opcode = OP_LOAD_MEMBASE;
13933 ins->inst_basereg = vtaddr->inst_basereg;
13934 ins->inst_offset = vtaddr->inst_offset;
13937 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13938 /* gsharedvt arg passed by ref */
13939 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13941 ins->opcode = OP_LOAD_MEMBASE;
13942 ins->inst_basereg = var->inst_basereg;
13943 ins->inst_offset = var->inst_offset;
13944 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13945 MonoInst *load, *load2, *load3;
13946 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13947 int reg1, reg2, reg3;
13948 MonoInst *info_var = cfg->gsharedvt_info_var;
13949 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13953 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13956 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13958 g_assert (info_var);
13959 g_assert (locals_var);
13961 /* Mark the instruction used to compute the locals var as used */
13962 cfg->gsharedvt_locals_var_ins = NULL;
13964 /* Load the offset */
13965 if (info_var->opcode == OP_REGOFFSET) {
13966 reg1 = alloc_ireg (cfg);
13967 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13968 } else if (info_var->opcode == OP_REGVAR) {
13970 reg1 = info_var->dreg;
13972 g_assert_not_reached ();
13974 reg2 = alloc_ireg (cfg);
13975 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13976 /* Load the locals area address */
13977 reg3 = alloc_ireg (cfg);
13978 if (locals_var->opcode == OP_REGOFFSET) {
13979 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13980 } else if (locals_var->opcode == OP_REGVAR) {
13981 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13983 g_assert_not_reached ();
13985 /* Compute the address */
13986 ins->opcode = OP_PADD;
13990 mono_bblock_insert_before_ins (bb, ins, load3);
13991 mono_bblock_insert_before_ins (bb, load3, load2);
13993 mono_bblock_insert_before_ins (bb, load2, load);
13995 g_assert (var->opcode == OP_REGOFFSET);
13997 ins->opcode = OP_ADD_IMM;
13998 ins->sreg1 = var->inst_basereg;
13999 ins->inst_imm = var->inst_offset;
14002 *need_local_opts = TRUE;
14003 spec = INS_INFO (ins->opcode);
14006 if (ins->opcode < MONO_CEE_LAST) {
14007 mono_print_ins (ins);
14008 g_assert_not_reached ();
14012 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14016 if (MONO_IS_STORE_MEMBASE (ins)) {
14017 tmp_reg = ins->dreg;
14018 ins->dreg = ins->sreg2;
14019 ins->sreg2 = tmp_reg;
14022 spec2 [MONO_INST_DEST] = ' ';
14023 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14024 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14025 spec2 [MONO_INST_SRC3] = ' ';
14027 } else if (MONO_IS_STORE_MEMINDEX (ins))
14028 g_assert_not_reached ();
14033 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14034 printf ("\t %.3s %d", spec, ins->dreg);
14035 num_sregs = mono_inst_get_src_registers (ins, sregs);
14036 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14037 printf (" %d", sregs [srcindex]);
14044 regtype = spec [MONO_INST_DEST];
14045 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14048 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14049 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14050 MonoInst *store_ins;
14052 MonoInst *def_ins = ins;
14053 int dreg = ins->dreg; /* The original vreg */
14055 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14057 if (var->opcode == OP_REGVAR) {
14058 ins->dreg = var->dreg;
14059 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14061 * Instead of emitting a load+store, use a _membase opcode.
14063 g_assert (var->opcode == OP_REGOFFSET);
14064 if (ins->opcode == OP_MOVE) {
14068 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14069 ins->inst_basereg = var->inst_basereg;
14070 ins->inst_offset = var->inst_offset;
14073 spec = INS_INFO (ins->opcode);
14077 g_assert (var->opcode == OP_REGOFFSET);
14079 prev_dreg = ins->dreg;
14081 /* Invalidate any previous lvreg for this vreg */
14082 vreg_to_lvreg [ins->dreg] = 0;
14086 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14088 store_opcode = OP_STOREI8_MEMBASE_REG;
14091 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14093 #if SIZEOF_REGISTER != 8
14094 if (regtype == 'l') {
14095 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14096 mono_bblock_insert_after_ins (bb, ins, store_ins);
14097 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14098 mono_bblock_insert_after_ins (bb, ins, store_ins);
14099 def_ins = store_ins;
14104 g_assert (store_opcode != OP_STOREV_MEMBASE);
14106 /* Try to fuse the store into the instruction itself */
14107 /* FIXME: Add more instructions */
14108 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14109 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14110 ins->inst_imm = ins->inst_c0;
14111 ins->inst_destbasereg = var->inst_basereg;
14112 ins->inst_offset = var->inst_offset;
14113 spec = INS_INFO (ins->opcode);
14114 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14115 ins->opcode = store_opcode;
14116 ins->inst_destbasereg = var->inst_basereg;
14117 ins->inst_offset = var->inst_offset;
14121 tmp_reg = ins->dreg;
14122 ins->dreg = ins->sreg2;
14123 ins->sreg2 = tmp_reg;
14126 spec2 [MONO_INST_DEST] = ' ';
14127 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14128 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14129 spec2 [MONO_INST_SRC3] = ' ';
14131 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14132 // FIXME: The backends expect the base reg to be in inst_basereg
14133 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14135 ins->inst_basereg = var->inst_basereg;
14136 ins->inst_offset = var->inst_offset;
14137 spec = INS_INFO (ins->opcode);
14139 /* printf ("INS: "); mono_print_ins (ins); */
14140 /* Create a store instruction */
14141 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14143 /* Insert it after the instruction */
14144 mono_bblock_insert_after_ins (bb, ins, store_ins);
14146 def_ins = store_ins;
14149 * We can't assign ins->dreg to var->dreg here, since the
14150 * sregs could use it. So set a flag, and do it after
14153 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14154 dest_has_lvreg = TRUE;
14159 if (def_ins && !live_range_start [dreg]) {
14160 live_range_start [dreg] = def_ins;
14161 live_range_start_bb [dreg] = bb;
14164 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14167 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14168 tmp->inst_c1 = dreg;
14169 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14176 num_sregs = mono_inst_get_src_registers (ins, sregs);
14177 for (srcindex = 0; srcindex < 3; ++srcindex) {
14178 regtype = spec [MONO_INST_SRC1 + srcindex];
14179 sreg = sregs [srcindex];
14181 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14182 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14183 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14184 MonoInst *use_ins = ins;
14185 MonoInst *load_ins;
14186 guint32 load_opcode;
14188 if (var->opcode == OP_REGVAR) {
14189 sregs [srcindex] = var->dreg;
14190 //mono_inst_set_src_registers (ins, sregs);
14191 live_range_end [sreg] = use_ins;
14192 live_range_end_bb [sreg] = bb;
14194 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14197 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14198 /* var->dreg is a hreg */
14199 tmp->inst_c1 = sreg;
14200 mono_bblock_insert_after_ins (bb, ins, tmp);
14206 g_assert (var->opcode == OP_REGOFFSET);
14208 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14210 g_assert (load_opcode != OP_LOADV_MEMBASE);
14212 if (vreg_to_lvreg [sreg]) {
14213 g_assert (vreg_to_lvreg [sreg] != -1);
14215 /* The variable is already loaded to an lvreg */
14216 if (G_UNLIKELY (cfg->verbose_level > 2))
14217 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14218 sregs [srcindex] = vreg_to_lvreg [sreg];
14219 //mono_inst_set_src_registers (ins, sregs);
14223 /* Try to fuse the load into the instruction */
14224 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14225 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14226 sregs [0] = var->inst_basereg;
14227 //mono_inst_set_src_registers (ins, sregs);
14228 ins->inst_offset = var->inst_offset;
14229 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14230 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14231 sregs [1] = var->inst_basereg;
14232 //mono_inst_set_src_registers (ins, sregs);
14233 ins->inst_offset = var->inst_offset;
14235 if (MONO_IS_REAL_MOVE (ins)) {
14236 ins->opcode = OP_NOP;
14239 //printf ("%d ", srcindex); mono_print_ins (ins);
14241 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14243 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14244 if (var->dreg == prev_dreg) {
14246 * sreg refers to the value loaded by the load
14247 * emitted below, but we need to use ins->dreg
14248 * since it refers to the store emitted earlier.
14252 g_assert (sreg != -1);
14253 vreg_to_lvreg [var->dreg] = sreg;
14254 g_assert (lvregs_len < 1024);
14255 lvregs [lvregs_len ++] = var->dreg;
14259 sregs [srcindex] = sreg;
14260 //mono_inst_set_src_registers (ins, sregs);
14262 #if SIZEOF_REGISTER != 8
14263 if (regtype == 'l') {
14264 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14265 mono_bblock_insert_before_ins (bb, ins, load_ins);
14266 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14267 mono_bblock_insert_before_ins (bb, ins, load_ins);
14268 use_ins = load_ins;
14273 #if SIZEOF_REGISTER == 4
14274 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14276 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14277 mono_bblock_insert_before_ins (bb, ins, load_ins);
14278 use_ins = load_ins;
14282 if (var->dreg < orig_next_vreg) {
14283 live_range_end [var->dreg] = use_ins;
14284 live_range_end_bb [var->dreg] = bb;
14287 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14290 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14291 tmp->inst_c1 = var->dreg;
14292 mono_bblock_insert_after_ins (bb, ins, tmp);
14296 mono_inst_set_src_registers (ins, sregs);
14298 if (dest_has_lvreg) {
14299 g_assert (ins->dreg != -1);
14300 vreg_to_lvreg [prev_dreg] = ins->dreg;
14301 g_assert (lvregs_len < 1024);
14302 lvregs [lvregs_len ++] = prev_dreg;
14303 dest_has_lvreg = FALSE;
14307 tmp_reg = ins->dreg;
14308 ins->dreg = ins->sreg2;
14309 ins->sreg2 = tmp_reg;
14312 if (MONO_IS_CALL (ins)) {
14313 /* Clear vreg_to_lvreg array */
14314 for (i = 0; i < lvregs_len; i++)
14315 vreg_to_lvreg [lvregs [i]] = 0;
14317 } else if (ins->opcode == OP_NOP) {
14319 MONO_INST_NULLIFY_SREGS (ins);
14322 if (cfg->verbose_level > 2)
14323 mono_print_ins_index (1, ins);
14326 /* Extend the live range based on the liveness info */
14327 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14328 for (i = 0; i < cfg->num_varinfo; i ++) {
14329 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14331 if (vreg_is_volatile (cfg, vi->vreg))
14332 /* The liveness info is incomplete */
14335 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14336 /* Live from at least the first ins of this bb */
14337 live_range_start [vi->vreg] = bb->code;
14338 live_range_start_bb [vi->vreg] = bb;
14341 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14342 /* Live at least until the last ins of this bb */
14343 live_range_end [vi->vreg] = bb->last_ins;
14344 live_range_end_bb [vi->vreg] = bb;
14350 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14352 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14353 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14355 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14356 for (i = 0; i < cfg->num_varinfo; ++i) {
14357 int vreg = MONO_VARINFO (cfg, i)->vreg;
14360 if (live_range_start [vreg]) {
14361 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14363 ins->inst_c1 = vreg;
14364 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14366 if (live_range_end [vreg]) {
14367 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14369 ins->inst_c1 = vreg;
14370 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14371 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14373 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14379 if (cfg->gsharedvt_locals_var_ins) {
14380 /* Nullify if unused */
14381 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14382 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14385 g_free (live_range_start);
14386 g_free (live_range_end);
14387 g_free (live_range_start_bb);
14388 g_free (live_range_end_bb);
14393 * - use 'iadd' instead of 'int_add'
14394 * - handling ovf opcodes: decompose in method_to_ir.
14395 * - unify iregs/fregs
14396 * -> partly done, the missing parts are:
14397 * - a more complete unification would involve unifying the hregs as well, so
14398 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14399 * would no longer map to the machine hregs, so the code generators would need to
14400 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14401 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14402 * fp/non-fp branches speeds it up by about 15%.
14403 * - use sext/zext opcodes instead of shifts
14405 * - get rid of TEMPLOADs if possible and use vregs instead
14406 * - clean up usage of OP_P/OP_ opcodes
14407 * - cleanup usage of DUMMY_USE
14408 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14410 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14411 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14412 * - make sure handle_stack_args () is called before the branch is emitted
14413 * - when the new IR is done, get rid of all unused stuff
14414 * - COMPARE/BEQ as separate instructions or unify them ?
14415 * - keeping them separate allows specialized compare instructions like
14416 * compare_imm, compare_membase
14417 * - most back ends unify fp compare+branch, fp compare+ceq
14418 * - integrate mono_save_args into inline_method
14419 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14420 * - handle long shift opts on 32 bit platforms somehow: they require
14421 * 3 sregs (2 for arg1 and 1 for arg2)
14422 * - make byref a 'normal' type.
14423 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14424 * variable if needed.
14425 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14426 * like inline_method.
14427 * - remove inlining restrictions
14428 * - fix LNEG and enable cfold of INEG
14429 * - generalize x86 optimizations like ldelema as a peephole optimization
14430 * - add store_mem_imm for amd64
14431 * - optimize the loading of the interruption flag in the managed->native wrappers
14432 * - avoid special handling of OP_NOP in passes
14433 * - move code inserting instructions into one function/macro.
14434 * - try a coalescing phase after liveness analysis
14435 * - add float -> vreg conversion + local optimizations on !x86
14436 * - figure out how to handle decomposed branches during optimizations, ie.
14437 * compare+branch, op_jump_table+op_br etc.
14438 * - promote RuntimeXHandles to vregs
14439 * - vtype cleanups:
14440 * - add a NEW_VARLOADA_VREG macro
14441 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14442 * accessing vtype fields.
14443 * - get rid of I8CONST on 64 bit platforms
14444 * - dealing with the increase in code size due to branches created during opcode
14446 * - use extended basic blocks
14447 * - all parts of the JIT
14448 * - handle_global_vregs () && local regalloc
14449 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14450 * - sources of increase in code size:
14453 * - isinst and castclass
14454 * - lvregs not allocated to global registers even if used multiple times
14455 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14457 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14458 * - add all micro optimizations from the old JIT
14459 * - put tree optimizations into the deadce pass
14460 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14461 * specific function.
14462 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14463 * fcompare + branchCC.
14464 * - create a helper function for allocating a stack slot, taking into account
14465 * MONO_CFG_HAS_SPILLUP.
14467 * - merge the ia64 switch changes.
14468 * - optimize mono_regstate2_alloc_int/float.
14469 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14470 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14471 * parts of the tree could be separated by other instructions, killing the tree
14472 * arguments, or stores killing loads etc. Also, should we fold loads into other
14473 * instructions if the result of the load is used multiple times ?
14474 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14475 * - LAST MERGE: 108395.
14476 * - when returning vtypes in registers, generate IR and append it to the end of the
14477 * last bb instead of doing it in the epilog.
14478 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14486 - When to decompose opcodes:
14487 - earlier: this makes some optimizations hard to implement, since the low level IR
14488 no longer contains the neccessary information. But it is easier to do.
14489 - later: harder to implement, enables more optimizations.
14490 - Branches inside bblocks:
14491 - created when decomposing complex opcodes.
14492 - branches to another bblock: harmless, but not tracked by the branch
14493 optimizations, so need to branch to a label at the start of the bblock.
14494 - branches to inside the same bblock: very problematic, trips up the local
14495 reg allocator. Can be fixed by spitting the current bblock, but that is a
14496 complex operation, since some local vregs can become global vregs etc.
14497 - Local/global vregs:
14498 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14499 local register allocator.
14500 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14501 structure, created by mono_create_var (). Assigned to hregs or the stack by
14502 the global register allocator.
14503 - When to do optimizations like alu->alu_imm:
14504 - earlier -> saves work later on since the IR will be smaller/simpler
14505 - later -> can work on more instructions
14506 - Handling of valuetypes:
14507 - When a vtype is pushed on the stack, a new temporary is created, an
14508 instruction computing its address (LDADDR) is emitted and pushed on
14509 the stack. Need to optimize cases when the vtype is used immediately as in
14510 argument passing, stloc etc.
14511 - Instead of the to_end stuff in the old JIT, simply call the function handling
14512 the values on the stack before emitting the last instruction of the bb.
14515 #endif /* DISABLE_JIT */