2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
75 /* These have 'cfg' as an implicit argument */
76 #define INLINE_FAILURE(msg) do { \
77 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
78 inline_failure (cfg, msg); \
79 goto exception_exit; \
82 #define CHECK_CFG_EXCEPTION do {\
83 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
84 goto exception_exit; \
86 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
87 method_access_failure ((cfg), (method), (cmethod)); \
88 goto exception_exit; \
90 #define FIELD_ACCESS_FAILURE(method, field) do { \
91 field_access_failure ((cfg), (method), (field)); \
92 goto exception_exit; \
94 #define GENERIC_SHARING_FAILURE(opcode) do { \
96 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
97 goto exception_exit; \
100 #define GSHAREDVT_FAILURE(opcode) do { \
101 if (cfg->gsharedvt) { \
102 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
103 goto exception_exit; \
106 #define OUT_OF_MEMORY_FAILURE do { \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
108 goto exception_exit; \
110 #define DISABLE_AOT(cfg) do { \
111 if ((cfg)->verbose_level >= 2) \
112 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
113 (cfg)->disable_aot = TRUE; \
115 #define LOAD_ERROR do { \
116 break_on_unverified (); \
117 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
118 goto exception_exit; \
121 #define TYPE_LOAD_ERROR(klass) do { \
122 cfg->exception_ptr = klass; \
126 /* Determine whenever 'ins' represents a load of the 'this' argument */
127 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
129 static int ldind_to_load_membase (int opcode);
130 static int stind_to_store_membase (int opcode);
132 int mono_op_to_op_imm (int opcode);
133 int mono_op_to_op_imm_noemul (int opcode);
135 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
137 /* helper methods signatures */
138 static MonoMethodSignature *helper_sig_class_init_trampoline;
139 static MonoMethodSignature *helper_sig_domain_get;
140 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
141 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
142 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
143 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
144 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
147 * Instruction metadata
155 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
156 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
162 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
167 /* keep in sync with the enum in mini.h */
170 #include "mini-ops.h"
175 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
176 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
178 * This should contain the index of the last sreg + 1. This is not the same
179 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
181 const gint8 ins_sreg_counts[] = {
182 #include "mini-ops.h"
187 #define MONO_INIT_VARINFO(vi,id) do { \
188 (vi)->range.first_use.pos.bid = 0xffff; \
194 mono_alloc_ireg (MonoCompile *cfg)
196 return alloc_ireg (cfg);
200 mono_alloc_lreg (MonoCompile *cfg)
202 return alloc_lreg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
267 type = mini_replace_type (type);
269 switch (type->type) {
272 case MONO_TYPE_BOOLEAN:
284 case MONO_TYPE_FNPTR:
286 case MONO_TYPE_CLASS:
287 case MONO_TYPE_STRING:
288 case MONO_TYPE_OBJECT:
289 case MONO_TYPE_SZARRAY:
290 case MONO_TYPE_ARRAY:
294 #if SIZEOF_REGISTER == 8
303 case MONO_TYPE_VALUETYPE:
304 if (type->data.klass->enumtype) {
305 type = mono_class_enum_basetype (type->data.klass);
308 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
311 case MONO_TYPE_TYPEDBYREF:
313 case MONO_TYPE_GENERICINST:
314 type = &type->data.generic_class->container_class->byval_arg;
318 g_assert (cfg->generic_sharing_context);
319 if (mini_type_var_is_vt (cfg, type))
324 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
330 mono_print_bb (MonoBasicBlock *bb, const char *msg)
335 printf ("\n%s %d: [IN: ", msg, bb->block_num);
336 for (i = 0; i < bb->in_count; ++i)
337 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
339 for (i = 0; i < bb->out_count; ++i)
340 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
342 for (tree = bb->code; tree; tree = tree->next)
343 mono_print_ins_index (-1, tree);
347 mono_create_helper_signatures (void)
349 helper_sig_domain_get = mono_create_icall_signature ("ptr");
350 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
352 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
353 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
354 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
355 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 static MONO_NEVER_INLINE void
359 break_on_unverified (void)
361 if (mini_get_debug_options ()->break_on_unverified)
365 static MONO_NEVER_INLINE void
366 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
368 char *method_fname = mono_method_full_name (method, TRUE);
369 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
370 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
371 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
372 g_free (method_fname);
373 g_free (cil_method_fname);
376 static MONO_NEVER_INLINE void
377 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *field_fname = mono_field_full_name (field);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
382 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
383 g_free (method_fname);
384 g_free (field_fname);
387 static MONO_NEVER_INLINE void
388 inline_failure (MonoCompile *cfg, const char *msg)
390 if (cfg->verbose_level >= 2)
391 printf ("inline failed: %s\n", msg);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
395 static MONO_NEVER_INLINE void
396 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
398 if (cfg->verbose_level > 2) \
399 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
403 static MONO_NEVER_INLINE void
404 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
406 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
407 if (cfg->verbose_level >= 2)
408 printf ("%s\n", cfg->exception_message);
409 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
413 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
414 * foo<T> (int i) { ldarg.0; box T; }
416 #define UNVERIFIED do { \
417 if (cfg->gsharedvt) { \
418 if (cfg->verbose_level > 2) \
419 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
421 goto exception_exit; \
423 break_on_unverified (); \
427 #define GET_BBLOCK(cfg,tblock,ip) do { \
428 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
430 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
431 NEW_BBLOCK (cfg, (tblock)); \
432 (tblock)->cil_code = (ip); \
433 ADD_BBLOCK (cfg, (tblock)); \
437 #if defined(TARGET_X86) || defined(TARGET_AMD64)
438 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
439 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
440 (dest)->dreg = alloc_ireg_mp ((cfg)); \
441 (dest)->sreg1 = (sr1); \
442 (dest)->sreg2 = (sr2); \
443 (dest)->inst_imm = (imm); \
444 (dest)->backend.shift_amount = (shift); \
445 MONO_ADD_INS ((cfg)->cbb, (dest)); \
449 #if SIZEOF_REGISTER == 8
450 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
451 /* FIXME: Need to add many more cases */ \
452 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
454 int dr = alloc_preg (cfg); \
455 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
456 (ins)->sreg2 = widen->dreg; \
460 #define ADD_WIDEN_OP(ins, arg1, arg2)
463 #define ADD_BINOP(op) do { \
464 MONO_INST_NEW (cfg, ins, (op)); \
466 ins->sreg1 = sp [0]->dreg; \
467 ins->sreg2 = sp [1]->dreg; \
468 type_from_op (ins, sp [0], sp [1]); \
470 /* Have to insert a widening op */ \
471 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
472 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
473 MONO_ADD_INS ((cfg)->cbb, (ins)); \
474 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
477 #define ADD_UNOP(op) do { \
478 MONO_INST_NEW (cfg, ins, (op)); \
480 ins->sreg1 = sp [0]->dreg; \
481 type_from_op (ins, sp [0], NULL); \
483 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
484 MONO_ADD_INS ((cfg)->cbb, (ins)); \
485 *sp++ = mono_decompose_opcode (cfg, ins); \
488 #define ADD_BINCOND(next_block) do { \
491 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
492 cmp->sreg1 = sp [0]->dreg; \
493 cmp->sreg2 = sp [1]->dreg; \
494 type_from_op (cmp, sp [0], sp [1]); \
496 type_from_op (ins, sp [0], sp [1]); \
497 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
498 GET_BBLOCK (cfg, tblock, target); \
499 link_bblock (cfg, bblock, tblock); \
500 ins->inst_true_bb = tblock; \
501 if ((next_block)) { \
502 link_bblock (cfg, bblock, (next_block)); \
503 ins->inst_false_bb = (next_block); \
504 start_new_bblock = 1; \
506 GET_BBLOCK (cfg, tblock, ip); \
507 link_bblock (cfg, bblock, tblock); \
508 ins->inst_false_bb = tblock; \
509 start_new_bblock = 2; \
511 if (sp != stack_start) { \
512 handle_stack_args (cfg, stack_start, sp - stack_start); \
513 CHECK_UNVERIFIABLE (cfg); \
515 MONO_ADD_INS (bblock, cmp); \
516 MONO_ADD_INS (bblock, ins); \
520 * link_bblock: Links two basic blocks
522 * links two basic blocks in the control flow graph, the 'from'
523 * argument is the starting block and the 'to' argument is the block
524 * the control flow ends to after 'from'.
527 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
529 MonoBasicBlock **newa;
533 if (from->cil_code) {
535 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
537 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
540 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
542 printf ("edge from entry to exit\n");
547 for (i = 0; i < from->out_count; ++i) {
548 if (to == from->out_bb [i]) {
554 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
555 for (i = 0; i < from->out_count; ++i) {
556 newa [i] = from->out_bb [i];
564 for (i = 0; i < to->in_count; ++i) {
565 if (from == to->in_bb [i]) {
571 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
572 for (i = 0; i < to->in_count; ++i) {
573 newa [i] = to->in_bb [i];
582 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
584 link_bblock (cfg, from, to);
588 * mono_find_block_region:
590 * We mark each basic block with a region ID. We use that to avoid BB
591 * optimizations when blocks are in different regions.
594 * A region token that encodes where this region is, and information
595 * about the clause owner for this block.
597 * The region encodes the try/catch/filter clause that owns this block
598 * as well as the type. -1 is a special value that represents a block
599 * that is in none of try/catch/filter.
602 mono_find_block_region (MonoCompile *cfg, int offset)
604 MonoMethodHeader *header = cfg->header;
605 MonoExceptionClause *clause;
608 for (i = 0; i < header->num_clauses; ++i) {
609 clause = &header->clauses [i];
610 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
611 (offset < (clause->handler_offset)))
612 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
614 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
615 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
616 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
617 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
618 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
620 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
623 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
624 return ((i + 1) << 8) | clause->flags;
631 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
633 MonoMethodHeader *header = cfg->header;
634 MonoExceptionClause *clause;
638 for (i = 0; i < header->num_clauses; ++i) {
639 clause = &header->clauses [i];
640 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
641 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
642 if (clause->flags == type)
643 res = g_list_append (res, clause);
650 mono_create_spvar_for_region (MonoCompile *cfg, int region)
654 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
658 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
659 /* prevent it from being register allocated */
660 var->flags |= MONO_INST_VOLATILE;
662 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
666 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
668 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
672 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
676 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
680 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
681 /* prevent it from being register allocated */
682 var->flags |= MONO_INST_VOLATILE;
684 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
690 * Returns the type used in the eval stack when @type is loaded.
691 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
694 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
698 type = mini_replace_type (type);
699 inst->klass = klass = mono_class_from_mono_type (type);
701 inst->type = STACK_MP;
706 switch (type->type) {
708 inst->type = STACK_INV;
712 case MONO_TYPE_BOOLEAN:
718 inst->type = STACK_I4;
723 case MONO_TYPE_FNPTR:
724 inst->type = STACK_PTR;
726 case MONO_TYPE_CLASS:
727 case MONO_TYPE_STRING:
728 case MONO_TYPE_OBJECT:
729 case MONO_TYPE_SZARRAY:
730 case MONO_TYPE_ARRAY:
731 inst->type = STACK_OBJ;
735 inst->type = STACK_I8;
739 inst->type = STACK_R8;
741 case MONO_TYPE_VALUETYPE:
742 if (type->data.klass->enumtype) {
743 type = mono_class_enum_basetype (type->data.klass);
747 inst->type = STACK_VTYPE;
750 case MONO_TYPE_TYPEDBYREF:
751 inst->klass = mono_defaults.typed_reference_class;
752 inst->type = STACK_VTYPE;
754 case MONO_TYPE_GENERICINST:
755 type = &type->data.generic_class->container_class->byval_arg;
759 g_assert (cfg->generic_sharing_context);
760 if (mini_is_gsharedvt_type (cfg, type)) {
761 g_assert (cfg->gsharedvt);
762 inst->type = STACK_VTYPE;
764 inst->type = STACK_OBJ;
768 g_error ("unknown type 0x%02x in eval stack type", type->type);
773 * The following tables are used to quickly validate the IL code in type_from_op ().
776 bin_num_table [STACK_MAX] [STACK_MAX] = {
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
779 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
789 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
792 /* reduce the size of this table */
794 bin_int_table [STACK_MAX] [STACK_MAX] = {
795 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
796 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
797 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
798 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
799 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
800 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
801 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
802 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
806 bin_comp_table [STACK_MAX] [STACK_MAX] = {
807 /* Inv i L p F & O vt */
809 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
810 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
811 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
812 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
813 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
814 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
815 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
818 /* reduce the size of this table */
820 shift_table [STACK_MAX] [STACK_MAX] = {
821 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
827 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
832 * Tables to map from the non-specific opcode to the matching
833 * type-specific opcode.
835 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
837 binops_op_map [STACK_MAX] = {
838 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
841 /* handles from CEE_NEG to CEE_CONV_U8 */
843 unops_op_map [STACK_MAX] = {
844 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
847 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
849 ovfops_op_map [STACK_MAX] = {
850 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
853 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
855 ovf2ops_op_map [STACK_MAX] = {
856 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
859 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
861 ovf3ops_op_map [STACK_MAX] = {
862 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
865 /* handles from CEE_BEQ to CEE_BLT_UN */
867 beqops_op_map [STACK_MAX] = {
868 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
871 /* handles from CEE_CEQ to CEE_CLT_UN */
873 ceqops_op_map [STACK_MAX] = {
874 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
878 * Sets ins->type (the type on the eval stack) according to the
879 * type of the opcode and the arguments to it.
880 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
882 * FIXME: this function sets ins->type unconditionally in some cases, but
883 * it should set it to invalid for some types (a conv.x on an object)
886 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
888 switch (ins->opcode) {
895 /* FIXME: check unverifiable args for STACK_MP */
896 ins->type = bin_num_table [src1->type] [src2->type];
897 ins->opcode += binops_op_map [ins->type];
904 ins->type = bin_int_table [src1->type] [src2->type];
905 ins->opcode += binops_op_map [ins->type];
910 ins->type = shift_table [src1->type] [src2->type];
911 ins->opcode += binops_op_map [ins->type];
916 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
917 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
918 ins->opcode = OP_LCOMPARE;
919 else if (src1->type == STACK_R8)
920 ins->opcode = OP_FCOMPARE;
922 ins->opcode = OP_ICOMPARE;
924 case OP_ICOMPARE_IMM:
925 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
926 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
927 ins->opcode = OP_LCOMPARE_IMM;
939 ins->opcode += beqops_op_map [src1->type];
942 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
943 ins->opcode += ceqops_op_map [src1->type];
949 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
950 ins->opcode += ceqops_op_map [src1->type];
954 ins->type = neg_table [src1->type];
955 ins->opcode += unops_op_map [ins->type];
958 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
959 ins->type = src1->type;
961 ins->type = STACK_INV;
962 ins->opcode += unops_op_map [ins->type];
968 ins->type = STACK_I4;
969 ins->opcode += unops_op_map [src1->type];
972 ins->type = STACK_R8;
973 switch (src1->type) {
976 ins->opcode = OP_ICONV_TO_R_UN;
979 ins->opcode = OP_LCONV_TO_R_UN;
983 case CEE_CONV_OVF_I1:
984 case CEE_CONV_OVF_U1:
985 case CEE_CONV_OVF_I2:
986 case CEE_CONV_OVF_U2:
987 case CEE_CONV_OVF_I4:
988 case CEE_CONV_OVF_U4:
989 ins->type = STACK_I4;
990 ins->opcode += ovf3ops_op_map [src1->type];
992 case CEE_CONV_OVF_I_UN:
993 case CEE_CONV_OVF_U_UN:
994 ins->type = STACK_PTR;
995 ins->opcode += ovf2ops_op_map [src1->type];
997 case CEE_CONV_OVF_I1_UN:
998 case CEE_CONV_OVF_I2_UN:
999 case CEE_CONV_OVF_I4_UN:
1000 case CEE_CONV_OVF_U1_UN:
1001 case CEE_CONV_OVF_U2_UN:
1002 case CEE_CONV_OVF_U4_UN:
1003 ins->type = STACK_I4;
1004 ins->opcode += ovf2ops_op_map [src1->type];
1007 ins->type = STACK_PTR;
1008 switch (src1->type) {
1010 ins->opcode = OP_ICONV_TO_U;
1014 #if SIZEOF_VOID_P == 8
1015 ins->opcode = OP_LCONV_TO_U;
1017 ins->opcode = OP_MOVE;
1021 ins->opcode = OP_LCONV_TO_U;
1024 ins->opcode = OP_FCONV_TO_U;
1030 ins->type = STACK_I8;
1031 ins->opcode += unops_op_map [src1->type];
1033 case CEE_CONV_OVF_I8:
1034 case CEE_CONV_OVF_U8:
1035 ins->type = STACK_I8;
1036 ins->opcode += ovf3ops_op_map [src1->type];
1038 case CEE_CONV_OVF_U8_UN:
1039 case CEE_CONV_OVF_I8_UN:
1040 ins->type = STACK_I8;
1041 ins->opcode += ovf2ops_op_map [src1->type];
1045 ins->type = STACK_R8;
1046 ins->opcode += unops_op_map [src1->type];
1049 ins->type = STACK_R8;
1053 ins->type = STACK_I4;
1054 ins->opcode += ovfops_op_map [src1->type];
1057 case CEE_CONV_OVF_I:
1058 case CEE_CONV_OVF_U:
1059 ins->type = STACK_PTR;
1060 ins->opcode += ovfops_op_map [src1->type];
1063 case CEE_ADD_OVF_UN:
1065 case CEE_MUL_OVF_UN:
1067 case CEE_SUB_OVF_UN:
1068 ins->type = bin_num_table [src1->type] [src2->type];
1069 ins->opcode += ovfops_op_map [src1->type];
1070 if (ins->type == STACK_R8)
1071 ins->type = STACK_INV;
1073 case OP_LOAD_MEMBASE:
1074 ins->type = STACK_PTR;
1076 case OP_LOADI1_MEMBASE:
1077 case OP_LOADU1_MEMBASE:
1078 case OP_LOADI2_MEMBASE:
1079 case OP_LOADU2_MEMBASE:
1080 case OP_LOADI4_MEMBASE:
1081 case OP_LOADU4_MEMBASE:
1082 ins->type = STACK_PTR;
1084 case OP_LOADI8_MEMBASE:
1085 ins->type = STACK_I8;
1087 case OP_LOADR4_MEMBASE:
1088 case OP_LOADR8_MEMBASE:
1089 ins->type = STACK_R8;
1092 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1096 if (ins->type == STACK_MP)
1097 ins->klass = mono_defaults.object_class;
1102 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1108 param_table [STACK_MAX] [STACK_MAX] = {
1113 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1117 switch (args->type) {
1127 for (i = 0; i < sig->param_count; ++i) {
1128 switch (args [i].type) {
1132 if (!sig->params [i]->byref)
1136 if (sig->params [i]->byref)
1138 switch (sig->params [i]->type) {
1139 case MONO_TYPE_CLASS:
1140 case MONO_TYPE_STRING:
1141 case MONO_TYPE_OBJECT:
1142 case MONO_TYPE_SZARRAY:
1143 case MONO_TYPE_ARRAY:
1150 if (sig->params [i]->byref)
1152 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1161 /*if (!param_table [args [i].type] [sig->params [i]->type])
1169 * When we need a pointer to the current domain many times in a method, we
1170 * call mono_domain_get() once and we store the result in a local variable.
1171 * This function returns the variable that represents the MonoDomain*.
1173 inline static MonoInst *
1174 mono_get_domainvar (MonoCompile *cfg)
1176 if (!cfg->domainvar)
1177 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1178 return cfg->domainvar;
1182 * The got_var contains the address of the Global Offset Table when AOT
1186 mono_get_got_var (MonoCompile *cfg)
1188 #ifdef MONO_ARCH_NEED_GOT_VAR
1189 if (!cfg->compile_aot)
1191 if (!cfg->got_var) {
1192 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1194 return cfg->got_var;
1201 mono_get_vtable_var (MonoCompile *cfg)
1203 g_assert (cfg->generic_sharing_context);
1205 if (!cfg->rgctx_var) {
1206 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1207 /* force the var to be stack allocated */
1208 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1211 return cfg->rgctx_var;
1215 type_from_stack_type (MonoInst *ins) {
1216 switch (ins->type) {
1217 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1218 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1219 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1220 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1222 return &ins->klass->this_arg;
1223 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1224 case STACK_VTYPE: return &ins->klass->byval_arg;
1226 g_error ("stack type %d to monotype not handled\n", ins->type);
1231 static G_GNUC_UNUSED int
1232 type_to_stack_type (MonoType *t)
1234 t = mono_type_get_underlying_type (t);
1238 case MONO_TYPE_BOOLEAN:
1241 case MONO_TYPE_CHAR:
1248 case MONO_TYPE_FNPTR:
1250 case MONO_TYPE_CLASS:
1251 case MONO_TYPE_STRING:
1252 case MONO_TYPE_OBJECT:
1253 case MONO_TYPE_SZARRAY:
1254 case MONO_TYPE_ARRAY:
1262 case MONO_TYPE_VALUETYPE:
1263 case MONO_TYPE_TYPEDBYREF:
1265 case MONO_TYPE_GENERICINST:
1266 if (mono_type_generic_inst_is_valuetype (t))
1272 g_assert_not_reached ();
1279 array_access_to_klass (int opcode)
1283 return mono_defaults.byte_class;
1285 return mono_defaults.uint16_class;
1288 return mono_defaults.int_class;
1291 return mono_defaults.sbyte_class;
1294 return mono_defaults.int16_class;
1297 return mono_defaults.int32_class;
1299 return mono_defaults.uint32_class;
1302 return mono_defaults.int64_class;
1305 return mono_defaults.single_class;
1308 return mono_defaults.double_class;
1309 case CEE_LDELEM_REF:
1310 case CEE_STELEM_REF:
1311 return mono_defaults.object_class;
1313 g_assert_not_reached ();
1319 * We try to share variables when possible
1322 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1327 /* inlining can result in deeper stacks */
1328 if (slot >= cfg->header->max_stack)
1329 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1331 pos = ins->type - 1 + slot * STACK_MAX;
1333 switch (ins->type) {
1340 if ((vnum = cfg->intvars [pos]))
1341 return cfg->varinfo [vnum];
1342 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1343 cfg->intvars [pos] = res->inst_c0;
1346 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1352 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1355 * Don't use this if a generic_context is set, since that means AOT can't
1356 * look up the method using just the image+token.
1357 * table == 0 means this is a reference made from a wrapper.
1359 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1360 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1361 jump_info_token->image = image;
1362 jump_info_token->token = token;
1363 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1368 * This function is called to handle items that are left on the evaluation stack
1369 * at basic block boundaries. What happens is that we save the values to local variables
1370 * and we reload them later when first entering the target basic block (with the
1371 * handle_loaded_temps () function).
1372 * A single joint point will use the same variables (stored in the array bb->out_stack or
1373 * bb->in_stack, if the basic block is before or after the joint point).
1375 * This function needs to be called _before_ emitting the last instruction of
1376 * the bb (i.e. before emitting a branch).
1377 * If the stack merge fails at a join point, cfg->unverifiable is set.
1380 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1383 MonoBasicBlock *bb = cfg->cbb;
1384 MonoBasicBlock *outb;
1385 MonoInst *inst, **locals;
1390 if (cfg->verbose_level > 3)
1391 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1392 if (!bb->out_scount) {
1393 bb->out_scount = count;
1394 //printf ("bblock %d has out:", bb->block_num);
1396 for (i = 0; i < bb->out_count; ++i) {
1397 outb = bb->out_bb [i];
1398 /* exception handlers are linked, but they should not be considered for stack args */
1399 if (outb->flags & BB_EXCEPTION_HANDLER)
1401 //printf (" %d", outb->block_num);
1402 if (outb->in_stack) {
1404 bb->out_stack = outb->in_stack;
1410 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1411 for (i = 0; i < count; ++i) {
1413 * try to reuse temps already allocated for this purpouse, if they occupy the same
1414 * stack slot and if they are of the same type.
1415 * This won't cause conflicts since if 'local' is used to
1416 * store one of the values in the in_stack of a bblock, then
1417 * the same variable will be used for the same outgoing stack
1419 * This doesn't work when inlining methods, since the bblocks
1420 * in the inlined methods do not inherit their in_stack from
1421 * the bblock they are inlined to. See bug #58863 for an
1424 if (cfg->inlined_method)
1425 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1427 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1432 for (i = 0; i < bb->out_count; ++i) {
1433 outb = bb->out_bb [i];
1434 /* exception handlers are linked, but they should not be considered for stack args */
1435 if (outb->flags & BB_EXCEPTION_HANDLER)
1437 if (outb->in_scount) {
1438 if (outb->in_scount != bb->out_scount) {
1439 cfg->unverifiable = TRUE;
1442 continue; /* check they are the same locals */
1444 outb->in_scount = count;
1445 outb->in_stack = bb->out_stack;
1448 locals = bb->out_stack;
1450 for (i = 0; i < count; ++i) {
1451 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1452 inst->cil_code = sp [i]->cil_code;
1453 sp [i] = locals [i];
1454 if (cfg->verbose_level > 3)
1455 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1459 * It is possible that the out bblocks already have in_stack assigned, and
1460 * the in_stacks differ. In this case, we will store to all the different
1467 /* Find a bblock which has a different in_stack */
1469 while (bindex < bb->out_count) {
1470 outb = bb->out_bb [bindex];
1471 /* exception handlers are linked, but they should not be considered for stack args */
1472 if (outb->flags & BB_EXCEPTION_HANDLER) {
1476 if (outb->in_stack != locals) {
1477 for (i = 0; i < count; ++i) {
1478 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1479 inst->cil_code = sp [i]->cil_code;
1480 sp [i] = locals [i];
1481 if (cfg->verbose_level > 3)
1482 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1484 locals = outb->in_stack;
1493 /* Emit code which loads interface_offsets [klass->interface_id]
1494 * The array is stored in memory before vtable.
1497 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1499 if (cfg->compile_aot) {
1500 int ioffset_reg = alloc_preg (cfg);
1501 int iid_reg = alloc_preg (cfg);
1503 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1505 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1508 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1513 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1515 int ibitmap_reg = alloc_preg (cfg);
1516 #ifdef COMPRESSED_INTERFACE_BITMAP
1518 MonoInst *res, *ins;
1519 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1520 MONO_ADD_INS (cfg->cbb, ins);
1522 if (cfg->compile_aot)
1523 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1525 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1526 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1527 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1529 int ibitmap_byte_reg = alloc_preg (cfg);
1531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 int shifted_iid_reg = alloc_preg (cfg);
1536 int ibitmap_byte_address_reg = alloc_preg (cfg);
1537 int masked_iid_reg = alloc_preg (cfg);
1538 int iid_one_bit_reg = alloc_preg (cfg);
1539 int iid_bit_reg = alloc_preg (cfg);
1540 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1542 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1543 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1545 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1546 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1547 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1549 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1556 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1557 * stored in "klass_reg" implements the interface "klass".
1560 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1562 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1566 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1567 * stored in "vtable_reg" implements the interface "klass".
1570 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1572 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1576 * Emit code which checks whenever the interface id of @klass is smaller than
1577 * than the value given by max_iid_reg.
1580 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1581 MonoBasicBlock *false_target)
1583 if (cfg->compile_aot) {
1584 int iid_reg = alloc_preg (cfg);
1585 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1586 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1591 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1593 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1596 /* Same as above, but obtains max_iid from a vtable */
1598 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1599 MonoBasicBlock *false_target)
1601 int max_iid_reg = alloc_preg (cfg);
1603 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1604 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1607 /* Same as above, but obtains max_iid from a klass */
1609 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1610 MonoBasicBlock *false_target)
1612 int max_iid_reg = alloc_preg (cfg);
1614 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1615 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1619 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1621 int idepth_reg = alloc_preg (cfg);
1622 int stypes_reg = alloc_preg (cfg);
1623 int stype = alloc_preg (cfg);
1625 mono_class_setup_supertypes (klass);
1627 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1628 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1635 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1636 } else if (cfg->compile_aot) {
1637 int const_reg = alloc_preg (cfg);
1638 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1639 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1641 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1647 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1649 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1653 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1655 int intf_reg = alloc_preg (cfg);
1657 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1658 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1663 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1667 * Variant of the above that takes a register to the class, not the vtable.
1670 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1672 int intf_bit_reg = alloc_preg (cfg);
1674 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1675 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1680 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1684 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1687 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1688 } else if (cfg->compile_aot) {
1689 int const_reg = alloc_preg (cfg);
1690 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1695 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1699 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1701 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1705 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1707 if (cfg->compile_aot) {
1708 int const_reg = alloc_preg (cfg);
1709 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1710 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1714 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1718 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1721 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1724 int rank_reg = alloc_preg (cfg);
1725 int eclass_reg = alloc_preg (cfg);
1727 g_assert (!klass_inst);
1728 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1729 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1730 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1731 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1732 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1733 if (klass->cast_class == mono_defaults.object_class) {
1734 int parent_reg = alloc_preg (cfg);
1735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1736 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1737 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1738 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1739 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1740 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1741 } else if (klass->cast_class == mono_defaults.enum_class) {
1742 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1743 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1744 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1746 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1747 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1750 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1751 /* Check that the object is a vector too */
1752 int bounds_reg = alloc_preg (cfg);
1753 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1755 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1758 int idepth_reg = alloc_preg (cfg);
1759 int stypes_reg = alloc_preg (cfg);
1760 int stype = alloc_preg (cfg);
1762 mono_class_setup_supertypes (klass);
1764 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1766 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1767 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1770 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1771 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1776 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1778 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1782 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1786 g_assert (val == 0);
1791 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1794 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1797 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1800 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1802 #if SIZEOF_REGISTER == 8
1804 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1810 val_reg = alloc_preg (cfg);
1812 if (SIZEOF_REGISTER == 8)
1813 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1815 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1818 /* This could be optimized further if neccesary */
1820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1827 #if !NO_UNALIGNED_ACCESS
1828 if (SIZEOF_REGISTER == 8) {
1830 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1835 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1860 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1867 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1868 g_assert (size < 10000);
1871 /* This could be optimized further if neccesary */
1873 cur_reg = alloc_preg (cfg);
1874 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1882 #if !NO_UNALIGNED_ACCESS
1883 if (SIZEOF_REGISTER == 8) {
1885 cur_reg = alloc_preg (cfg);
1886 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1887 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1896 cur_reg = alloc_preg (cfg);
1897 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1898 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1904 cur_reg = alloc_preg (cfg);
1905 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1912 cur_reg = alloc_preg (cfg);
1913 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1914 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1922 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1926 if (cfg->compile_aot) {
1927 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1928 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1930 ins->sreg2 = c->dreg;
1931 MONO_ADD_INS (cfg->cbb, ins);
1933 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1935 ins->inst_offset = mini_get_tls_offset (tls_key);
1936 MONO_ADD_INS (cfg->cbb, ins);
1943 * Emit IR to push the current LMF onto the LMF stack.
1946 emit_push_lmf (MonoCompile *cfg)
1949 * Emit IR to push the LMF:
1950 * lmf_addr = <lmf_addr from tls>
1951 * lmf->lmf_addr = lmf_addr
1952 * lmf->prev_lmf = *lmf_addr
1955 int lmf_reg, prev_lmf_reg;
1956 MonoInst *ins, *lmf_ins;
1961 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1962 /* Load current lmf */
1963 lmf_ins = mono_get_lmf_intrinsic (cfg);
1965 MONO_ADD_INS (cfg->cbb, lmf_ins);
1966 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1967 lmf_reg = ins->dreg;
1968 /* Save previous_lmf */
1969 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1971 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1974 * Store lmf_addr in a variable, so it can be allocated to a global register.
1976 if (!cfg->lmf_addr_var)
1977 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1980 ins = mono_get_jit_tls_intrinsic (cfg);
1982 int jit_tls_dreg = ins->dreg;
1984 MONO_ADD_INS (cfg->cbb, ins);
1985 lmf_reg = alloc_preg (cfg);
1986 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1988 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1991 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1993 MONO_ADD_INS (cfg->cbb, lmf_ins);
1996 MonoInst *args [16], *jit_tls_ins, *ins;
1998 /* Inline mono_get_lmf_addr () */
1999 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2001 /* Load mono_jit_tls_id */
2002 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2003 /* call pthread_getspecific () */
2004 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2005 /* lmf_addr = &jit_tls->lmf */
2006 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2009 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2013 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2015 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2016 lmf_reg = ins->dreg;
2018 prev_lmf_reg = alloc_preg (cfg);
2019 /* Save previous_lmf */
2020 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2021 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2023 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2030 * Emit IR to pop the current LMF from the LMF stack.
2033 emit_pop_lmf (MonoCompile *cfg)
2035 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2041 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2042 lmf_reg = ins->dreg;
2044 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2045 /* Load previous_lmf */
2046 prev_lmf_reg = alloc_preg (cfg);
2047 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2049 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2052 * Emit IR to pop the LMF:
2053 * *(lmf->lmf_addr) = lmf->prev_lmf
2055 /* This could be called before emit_push_lmf () */
2056 if (!cfg->lmf_addr_var)
2057 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2058 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2060 prev_lmf_reg = alloc_preg (cfg);
2061 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2062 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2067 emit_instrumentation_call (MonoCompile *cfg, void *func)
2069 MonoInst *iargs [1];
2072 * Avoid instrumenting inlined methods since it can
2073 * distort profiling results.
2075 if (cfg->method != cfg->current_method)
2078 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2079 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2080 mono_emit_jit_icall (cfg, func, iargs);
2085 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2088 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2091 type = mini_get_basic_type_from_generic (gsctx, type);
2092 type = mini_replace_type (type);
2093 switch (type->type) {
2094 case MONO_TYPE_VOID:
2095 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2098 case MONO_TYPE_BOOLEAN:
2101 case MONO_TYPE_CHAR:
2104 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2108 case MONO_TYPE_FNPTR:
2109 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2110 case MONO_TYPE_CLASS:
2111 case MONO_TYPE_STRING:
2112 case MONO_TYPE_OBJECT:
2113 case MONO_TYPE_SZARRAY:
2114 case MONO_TYPE_ARRAY:
2115 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2118 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2121 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2122 case MONO_TYPE_VALUETYPE:
2123 if (type->data.klass->enumtype) {
2124 type = mono_class_enum_basetype (type->data.klass);
2127 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2128 case MONO_TYPE_TYPEDBYREF:
2129 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2130 case MONO_TYPE_GENERICINST:
2131 type = &type->data.generic_class->container_class->byval_arg;
2134 case MONO_TYPE_MVAR:
2136 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2138 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2144 * target_type_is_incompatible:
2145 * @cfg: MonoCompile context
2147 * Check that the item @arg on the evaluation stack can be stored
2148 * in the target type (can be a local, or field, etc).
2149 * The cfg arg can be used to check if we need verification or just
2152 * Returns: non-0 value if arg can't be stored on a target.
2155 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2157 MonoType *simple_type;
2160 target = mini_replace_type (target);
2161 if (target->byref) {
2162 /* FIXME: check that the pointed to types match */
2163 if (arg->type == STACK_MP)
2164 return arg->klass != mono_class_from_mono_type (target);
2165 if (arg->type == STACK_PTR)
2170 simple_type = mono_type_get_underlying_type (target);
2171 switch (simple_type->type) {
2172 case MONO_TYPE_VOID:
2176 case MONO_TYPE_BOOLEAN:
2179 case MONO_TYPE_CHAR:
2182 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2186 /* STACK_MP is needed when setting pinned locals */
2187 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2192 case MONO_TYPE_FNPTR:
2194 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2195 * in native int. (#688008).
2197 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2200 case MONO_TYPE_CLASS:
2201 case MONO_TYPE_STRING:
2202 case MONO_TYPE_OBJECT:
2203 case MONO_TYPE_SZARRAY:
2204 case MONO_TYPE_ARRAY:
2205 if (arg->type != STACK_OBJ)
2207 /* FIXME: check type compatibility */
2211 if (arg->type != STACK_I8)
2216 if (arg->type != STACK_R8)
2219 case MONO_TYPE_VALUETYPE:
2220 if (arg->type != STACK_VTYPE)
2222 klass = mono_class_from_mono_type (simple_type);
2223 if (klass != arg->klass)
2226 case MONO_TYPE_TYPEDBYREF:
2227 if (arg->type != STACK_VTYPE)
2229 klass = mono_class_from_mono_type (simple_type);
2230 if (klass != arg->klass)
2233 case MONO_TYPE_GENERICINST:
2234 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2235 if (arg->type != STACK_VTYPE)
2237 klass = mono_class_from_mono_type (simple_type);
2238 if (klass != arg->klass)
2242 if (arg->type != STACK_OBJ)
2244 /* FIXME: check type compatibility */
2248 case MONO_TYPE_MVAR:
2249 g_assert (cfg->generic_sharing_context);
2250 if (mini_type_var_is_vt (cfg, simple_type)) {
2251 if (arg->type != STACK_VTYPE)
2254 if (arg->type != STACK_OBJ)
2259 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2265 * Prepare arguments for passing to a function call.
2266 * Return a non-zero value if the arguments can't be passed to the given
2268 * The type checks are not yet complete and some conversions may need
2269 * casts on 32 or 64 bit architectures.
2271 * FIXME: implement this using target_type_is_incompatible ()
2274 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2276 MonoType *simple_type;
2280 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2284 for (i = 0; i < sig->param_count; ++i) {
2285 if (sig->params [i]->byref) {
2286 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2290 simple_type = sig->params [i];
2291 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2293 switch (simple_type->type) {
2294 case MONO_TYPE_VOID:
2299 case MONO_TYPE_BOOLEAN:
2302 case MONO_TYPE_CHAR:
2305 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2311 case MONO_TYPE_FNPTR:
2312 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2315 case MONO_TYPE_CLASS:
2316 case MONO_TYPE_STRING:
2317 case MONO_TYPE_OBJECT:
2318 case MONO_TYPE_SZARRAY:
2319 case MONO_TYPE_ARRAY:
2320 if (args [i]->type != STACK_OBJ)
2325 if (args [i]->type != STACK_I8)
2330 if (args [i]->type != STACK_R8)
2333 case MONO_TYPE_VALUETYPE:
2334 if (simple_type->data.klass->enumtype) {
2335 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2338 if (args [i]->type != STACK_VTYPE)
2341 case MONO_TYPE_TYPEDBYREF:
2342 if (args [i]->type != STACK_VTYPE)
2345 case MONO_TYPE_GENERICINST:
2346 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2349 case MONO_TYPE_MVAR:
2351 if (args [i]->type != STACK_VTYPE)
2355 g_error ("unknown type 0x%02x in check_call_signature",
2363 callvirt_to_call (int opcode)
2366 case OP_CALL_MEMBASE:
2368 case OP_VOIDCALL_MEMBASE:
2370 case OP_FCALL_MEMBASE:
2372 case OP_VCALL_MEMBASE:
2374 case OP_LCALL_MEMBASE:
2377 g_assert_not_reached ();
2383 /* Either METHOD or IMT_ARG needs to be set */
2385 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2389 if (COMPILE_LLVM (cfg)) {
2390 method_reg = alloc_preg (cfg);
2393 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2394 } else if (cfg->compile_aot) {
2395 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2398 MONO_INST_NEW (cfg, ins, OP_PCONST);
2399 ins->inst_p0 = method;
2400 ins->dreg = method_reg;
2401 MONO_ADD_INS (cfg->cbb, ins);
2405 call->imt_arg_reg = method_reg;
2407 #ifdef MONO_ARCH_IMT_REG
2408 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2410 /* Need this to keep the IMT arg alive */
2411 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2416 #ifdef MONO_ARCH_IMT_REG
2417 method_reg = alloc_preg (cfg);
2420 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2421 } else if (cfg->compile_aot) {
2422 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2425 MONO_INST_NEW (cfg, ins, OP_PCONST);
2426 ins->inst_p0 = method;
2427 ins->dreg = method_reg;
2428 MONO_ADD_INS (cfg->cbb, ins);
2431 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2433 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2437 static MonoJumpInfo *
2438 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2440 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2444 ji->data.target = target;
2450 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2452 if (cfg->generic_sharing_context)
2453 return mono_class_check_context_used (klass);
2459 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2461 if (cfg->generic_sharing_context)
2462 return mono_method_check_context_used (method);
2468 * check_method_sharing:
2470 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2473 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2475 gboolean pass_vtable = FALSE;
2476 gboolean pass_mrgctx = FALSE;
2478 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2479 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2480 gboolean sharable = FALSE;
2482 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2485 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2486 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2487 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2489 sharable = sharing_enabled && context_sharable;
2493 * Pass vtable iff target method might
2494 * be shared, which means that sharing
2495 * is enabled for its class and its
2496 * context is sharable (and it's not a
2499 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2503 if (mini_method_get_context (cmethod) &&
2504 mini_method_get_context (cmethod)->method_inst) {
2505 g_assert (!pass_vtable);
2507 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2510 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2511 MonoGenericContext *context = mini_method_get_context (cmethod);
2512 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2514 if (sharing_enabled && context_sharable)
2516 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2521 if (out_pass_vtable)
2522 *out_pass_vtable = pass_vtable;
2523 if (out_pass_mrgctx)
2524 *out_pass_mrgctx = pass_mrgctx;
2527 inline static MonoCallInst *
2528 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2529 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2533 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2538 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2540 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2542 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2545 call->signature = sig;
2546 call->rgctx_reg = rgctx;
2547 sig_ret = mini_replace_type (sig->ret);
2549 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2552 if (mini_type_is_vtype (cfg, sig_ret)) {
2553 call->vret_var = cfg->vret_addr;
2554 //g_assert_not_reached ();
2556 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2557 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2560 temp->backend.is_pinvoke = sig->pinvoke;
2563 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2564 * address of return value to increase optimization opportunities.
2565 * Before vtype decomposition, the dreg of the call ins itself represents the
2566 * fact the call modifies the return value. After decomposition, the call will
2567 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2568 * will be transformed into an LDADDR.
2570 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2571 loada->dreg = alloc_preg (cfg);
2572 loada->inst_p0 = temp;
2573 /* We reference the call too since call->dreg could change during optimization */
2574 loada->inst_p1 = call;
2575 MONO_ADD_INS (cfg->cbb, loada);
2577 call->inst.dreg = temp->dreg;
2579 call->vret_var = loada;
2580 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2581 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2583 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2584 if (COMPILE_SOFT_FLOAT (cfg)) {
2586 * If the call has a float argument, we would need to do an r8->r4 conversion using
2587 * an icall, but that cannot be done during the call sequence since it would clobber
2588 * the call registers + the stack. So we do it before emitting the call.
2590 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2592 MonoInst *in = call->args [i];
2594 if (i >= sig->hasthis)
2595 t = sig->params [i - sig->hasthis];
2597 t = &mono_defaults.int_class->byval_arg;
2598 t = mono_type_get_underlying_type (t);
2600 if (!t->byref && t->type == MONO_TYPE_R4) {
2601 MonoInst *iargs [1];
2605 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2607 /* The result will be in an int vreg */
2608 call->args [i] = conv;
2614 call->need_unbox_trampoline = unbox_trampoline;
2617 if (COMPILE_LLVM (cfg))
2618 mono_llvm_emit_call (cfg, call);
2620 mono_arch_emit_call (cfg, call);
2622 mono_arch_emit_call (cfg, call);
2625 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2626 cfg->flags |= MONO_CFG_HAS_CALLS;
2632 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2634 #ifdef MONO_ARCH_RGCTX_REG
2635 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2636 cfg->uses_rgctx_reg = TRUE;
2637 call->rgctx_reg = TRUE;
2639 call->rgctx_arg_reg = rgctx_reg;
2646 inline static MonoInst*
2647 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2652 gboolean check_sp = FALSE;
2654 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2655 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2657 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2662 rgctx_reg = mono_alloc_preg (cfg);
2663 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2667 if (!cfg->stack_inbalance_var)
2668 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2670 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2671 ins->dreg = cfg->stack_inbalance_var->dreg;
2672 MONO_ADD_INS (cfg->cbb, ins);
2675 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2677 call->inst.sreg1 = addr->dreg;
2680 emit_imt_argument (cfg, call, NULL, imt_arg);
2682 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2687 sp_reg = mono_alloc_preg (cfg);
2689 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2691 MONO_ADD_INS (cfg->cbb, ins);
2693 /* Restore the stack so we don't crash when throwing the exception */
2694 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2695 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2696 MONO_ADD_INS (cfg->cbb, ins);
2698 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2699 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2703 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2705 return (MonoInst*)call;
2709 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2712 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2714 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2717 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2718 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2720 #ifndef DISABLE_REMOTING
2721 gboolean might_be_remote = FALSE;
2723 gboolean virtual = this != NULL;
2724 gboolean enable_for_aot = TRUE;
2728 gboolean need_unbox_trampoline;
2731 sig = mono_method_signature (method);
2734 rgctx_reg = mono_alloc_preg (cfg);
2735 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2738 if (method->string_ctor) {
2739 /* Create the real signature */
2740 /* FIXME: Cache these */
2741 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2742 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2747 context_used = mini_method_check_context_used (cfg, method);
2749 #ifndef DISABLE_REMOTING
2750 might_be_remote = this && sig->hasthis &&
2751 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2752 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2754 if (might_be_remote && context_used) {
2757 g_assert (cfg->generic_sharing_context);
2759 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2761 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2765 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2767 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2769 #ifndef DISABLE_REMOTING
2770 if (might_be_remote)
2771 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2774 call->method = method;
2775 call->inst.flags |= MONO_INST_HAS_METHOD;
2776 call->inst.inst_left = this;
2777 call->tail_call = tail;
2780 int vtable_reg, slot_reg, this_reg;
2783 this_reg = this->dreg;
2785 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2786 MonoInst *dummy_use;
2788 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2790 /* Make a call to delegate->invoke_impl */
2791 call->inst.inst_basereg = this_reg;
2792 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2793 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2795 /* We must emit a dummy use here because the delegate trampoline will
2796 replace the 'this' argument with the delegate target making this activation
2797 no longer a root for the delegate.
2798 This is an issue for delegates that target collectible code such as dynamic
2799 methods of GC'able assemblies.
2801 For a test case look into #667921.
2803 FIXME: a dummy use is not the best way to do it as the local register allocator
2804 will put it on a caller save register and spil it around the call.
2805 Ideally, we would either put it on a callee save register or only do the store part.
2807 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2809 return (MonoInst*)call;
2812 if ((!cfg->compile_aot || enable_for_aot) &&
2813 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2814 (MONO_METHOD_IS_FINAL (method) &&
2815 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2816 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2818 * the method is not virtual, we just need to ensure this is not null
2819 * and then we can call the method directly.
2821 #ifndef DISABLE_REMOTING
2822 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2824 * The check above ensures method is not gshared, this is needed since
2825 * gshared methods can't have wrappers.
2827 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2831 if (!method->string_ctor)
2832 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2834 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2835 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2837 * the method is virtual, but we can statically dispatch since either
2838 * it's class or the method itself are sealed.
2839 * But first we need to ensure it's not a null reference.
2841 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2843 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2845 vtable_reg = alloc_preg (cfg);
2846 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2847 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2850 guint32 imt_slot = mono_method_get_imt_slot (method);
2851 emit_imt_argument (cfg, call, call->method, imt_arg);
2852 slot_reg = vtable_reg;
2853 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2855 if (slot_reg == -1) {
2856 slot_reg = alloc_preg (cfg);
2857 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2858 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2861 slot_reg = vtable_reg;
2862 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2863 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2865 g_assert (mono_method_signature (method)->generic_param_count);
2866 emit_imt_argument (cfg, call, call->method, imt_arg);
2870 call->inst.sreg1 = slot_reg;
2871 call->inst.inst_offset = offset;
2872 call->virtual = TRUE;
2876 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2879 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2881 return (MonoInst*)call;
2885 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2887 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2891 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2898 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2901 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2903 return (MonoInst*)call;
2907 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2909 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2913 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2917 * mono_emit_abs_call:
2919 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2921 inline static MonoInst*
2922 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2923 MonoMethodSignature *sig, MonoInst **args)
2925 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2929 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2932 if (cfg->abs_patches == NULL)
2933 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2934 g_hash_table_insert (cfg->abs_patches, ji, ji);
2935 ins = mono_emit_native_call (cfg, ji, sig, args);
2936 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2941 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2943 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2944 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2948 * Native code might return non register sized integers
2949 * without initializing the upper bits.
2951 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2952 case OP_LOADI1_MEMBASE:
2953 widen_op = OP_ICONV_TO_I1;
2955 case OP_LOADU1_MEMBASE:
2956 widen_op = OP_ICONV_TO_U1;
2958 case OP_LOADI2_MEMBASE:
2959 widen_op = OP_ICONV_TO_I2;
2961 case OP_LOADU2_MEMBASE:
2962 widen_op = OP_ICONV_TO_U2;
2968 if (widen_op != -1) {
2969 int dreg = alloc_preg (cfg);
2972 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2973 widen->type = ins->type;
2983 get_memcpy_method (void)
2985 static MonoMethod *memcpy_method = NULL;
2986 if (!memcpy_method) {
2987 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2989 g_error ("Old corlib found. Install a new one");
2991 return memcpy_method;
2995 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2997 MonoClassField *field;
2998 gpointer iter = NULL;
3000 while ((field = mono_class_get_fields (klass, &iter))) {
3003 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3005 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3006 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3007 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3008 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3010 MonoClass *field_class = mono_class_from_mono_type (field->type);
3011 if (field_class->has_references)
3012 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3018 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3020 int card_table_shift_bits;
3021 gpointer card_table_mask;
3023 MonoInst *dummy_use;
3024 int nursery_shift_bits;
3025 size_t nursery_size;
3026 gboolean has_card_table_wb = FALSE;
3028 if (!cfg->gen_write_barriers)
3031 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3033 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3035 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3036 has_card_table_wb = TRUE;
3039 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3042 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3043 wbarrier->sreg1 = ptr->dreg;
3044 wbarrier->sreg2 = value->dreg;
3045 MONO_ADD_INS (cfg->cbb, wbarrier);
3046 } else if (card_table) {
3047 int offset_reg = alloc_preg (cfg);
3048 int card_reg = alloc_preg (cfg);
3051 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3052 if (card_table_mask)
3053 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3055 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3056 * IMM's larger than 32bits.
3058 if (cfg->compile_aot) {
3059 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3061 MONO_INST_NEW (cfg, ins, OP_PCONST);
3062 ins->inst_p0 = card_table;
3063 ins->dreg = card_reg;
3064 MONO_ADD_INS (cfg->cbb, ins);
3067 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3068 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3070 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3071 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3074 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3078 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3080 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3081 unsigned need_wb = 0;
3086 /*types with references can't have alignment smaller than sizeof(void*) */
3087 if (align < SIZEOF_VOID_P)
3090 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3091 if (size > 32 * SIZEOF_VOID_P)
3094 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3096 /* We don't unroll more than 5 stores to avoid code bloat. */
3097 if (size > 5 * SIZEOF_VOID_P) {
3098 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3099 size += (SIZEOF_VOID_P - 1);
3100 size &= ~(SIZEOF_VOID_P - 1);
3102 EMIT_NEW_ICONST (cfg, iargs [2], size);
3103 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3104 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3108 destreg = iargs [0]->dreg;
3109 srcreg = iargs [1]->dreg;
3112 dest_ptr_reg = alloc_preg (cfg);
3113 tmp_reg = alloc_preg (cfg);
3116 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3118 while (size >= SIZEOF_VOID_P) {
3119 MonoInst *load_inst;
3120 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3121 load_inst->dreg = tmp_reg;
3122 load_inst->inst_basereg = srcreg;
3123 load_inst->inst_offset = offset;
3124 MONO_ADD_INS (cfg->cbb, load_inst);
3126 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3129 emit_write_barrier (cfg, iargs [0], load_inst);
3131 offset += SIZEOF_VOID_P;
3132 size -= SIZEOF_VOID_P;
3135 /*tmp += sizeof (void*)*/
3136 if (size >= SIZEOF_VOID_P) {
3137 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3138 MONO_ADD_INS (cfg->cbb, iargs [0]);
3142 /* Those cannot be references since size < sizeof (void*) */
3144 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3145 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3151 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3152 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3158 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3159 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3168 * Emit code to copy a valuetype of type @klass whose address is stored in
3169 * @src->dreg to memory whose address is stored at @dest->dreg.
3172 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3174 MonoInst *iargs [4];
3175 int context_used, n;
3177 MonoMethod *memcpy_method;
3178 MonoInst *size_ins = NULL;
3179 MonoInst *memcpy_ins = NULL;
3183 * This check breaks with spilled vars... need to handle it during verification anyway.
3184 * g_assert (klass && klass == src->klass && klass == dest->klass);
3187 if (mini_is_gsharedvt_klass (cfg, klass)) {
3189 context_used = mini_class_check_context_used (cfg, klass);
3190 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3191 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3195 n = mono_class_native_size (klass, &align);
3197 n = mono_class_value_size (klass, &align);
3199 /* if native is true there should be no references in the struct */
3200 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3201 /* Avoid barriers when storing to the stack */
3202 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3203 (dest->opcode == OP_LDADDR))) {
3209 context_used = mini_class_check_context_used (cfg, klass);
3211 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3212 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3214 } else if (context_used) {
3215 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3217 if (cfg->compile_aot) {
3218 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3220 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3221 mono_class_compute_gc_descriptor (klass);
3226 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3228 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3233 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3234 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3235 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3240 iargs [2] = size_ins;
3242 EMIT_NEW_ICONST (cfg, iargs [2], n);
3244 memcpy_method = get_memcpy_method ();
3246 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3248 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3253 get_memset_method (void)
3255 static MonoMethod *memset_method = NULL;
3256 if (!memset_method) {
3257 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3259 g_error ("Old corlib found. Install a new one");
3261 return memset_method;
3265 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3267 MonoInst *iargs [3];
3268 int n, context_used;
3270 MonoMethod *memset_method;
3271 MonoInst *size_ins = NULL;
3272 MonoInst *bzero_ins = NULL;
3273 static MonoMethod *bzero_method;
3275 /* FIXME: Optimize this for the case when dest is an LDADDR */
3277 mono_class_init (klass);
3278 if (mini_is_gsharedvt_klass (cfg, klass)) {
3279 context_used = mini_class_check_context_used (cfg, klass);
3280 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3281 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3283 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3284 g_assert (bzero_method);
3286 iargs [1] = size_ins;
3287 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3291 n = mono_class_value_size (klass, &align);
3293 if (n <= sizeof (gpointer) * 5) {
3294 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3297 memset_method = get_memset_method ();
3299 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3300 EMIT_NEW_ICONST (cfg, iargs [2], n);
3301 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3306 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3308 MonoInst *this = NULL;
3310 g_assert (cfg->generic_sharing_context);
3312 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3313 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3314 !method->klass->valuetype)
3315 EMIT_NEW_ARGLOAD (cfg, this, 0);
3317 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3318 MonoInst *mrgctx_loc, *mrgctx_var;
3321 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3323 mrgctx_loc = mono_get_vtable_var (cfg);
3324 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3327 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3328 MonoInst *vtable_loc, *vtable_var;
3332 vtable_loc = mono_get_vtable_var (cfg);
3333 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3335 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3336 MonoInst *mrgctx_var = vtable_var;
3339 vtable_reg = alloc_preg (cfg);
3340 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3341 vtable_var->type = STACK_PTR;
3349 vtable_reg = alloc_preg (cfg);
3350 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3355 static MonoJumpInfoRgctxEntry *
3356 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3358 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3359 res->method = method;
3360 res->in_mrgctx = in_mrgctx;
3361 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3362 res->data->type = patch_type;
3363 res->data->data.target = patch_data;
3364 res->info_type = info_type;
3369 static inline MonoInst*
3370 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3372 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3376 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3377 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3379 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3380 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3382 return emit_rgctx_fetch (cfg, rgctx, entry);
3386 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3387 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3389 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3390 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3392 return emit_rgctx_fetch (cfg, rgctx, entry);
3396 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3397 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3399 MonoJumpInfoGSharedVtCall *call_info;
3400 MonoJumpInfoRgctxEntry *entry;
3403 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3404 call_info->sig = sig;
3405 call_info->method = cmethod;
3407 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3408 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3410 return emit_rgctx_fetch (cfg, rgctx, entry);
3415 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3416 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3418 MonoJumpInfoRgctxEntry *entry;
3421 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3422 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3424 return emit_rgctx_fetch (cfg, rgctx, entry);
3428 * emit_get_rgctx_method:
3430 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3431 * normal constants, else emit a load from the rgctx.
3434 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3435 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3437 if (!context_used) {
3440 switch (rgctx_type) {
3441 case MONO_RGCTX_INFO_METHOD:
3442 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3444 case MONO_RGCTX_INFO_METHOD_RGCTX:
3445 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3448 g_assert_not_reached ();
3451 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3452 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3454 return emit_rgctx_fetch (cfg, rgctx, entry);
3459 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3460 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3462 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3463 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3465 return emit_rgctx_fetch (cfg, rgctx, entry);
3469 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3471 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3472 MonoRuntimeGenericContextInfoTemplate *template;
3477 for (i = 0; i < info->num_entries; ++i) {
3478 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3480 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3484 if (info->num_entries == info->count_entries) {
3485 MonoRuntimeGenericContextInfoTemplate *new_entries;
3486 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3488 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3490 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3491 info->entries = new_entries;
3492 info->count_entries = new_count_entries;
3495 idx = info->num_entries;
3496 template = &info->entries [idx];
3497 template->info_type = rgctx_type;
3498 template->data = data;
3500 info->num_entries ++;
3506 * emit_get_gsharedvt_info:
3508 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3511 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3516 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3517 /* Load info->entries [idx] */
3518 dreg = alloc_preg (cfg);
3519 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3525 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3527 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3531 * On return the caller must check @klass for load errors.
3534 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3536 MonoInst *vtable_arg;
3540 context_used = mini_class_check_context_used (cfg, klass);
3543 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3544 klass, MONO_RGCTX_INFO_VTABLE);
3546 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3550 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3553 if (COMPILE_LLVM (cfg))
3554 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3556 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3557 #ifdef MONO_ARCH_VTABLE_REG
3558 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3559 cfg->uses_vtable_reg = TRUE;
3566 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3570 if (cfg->gen_seq_points && cfg->method == method) {
3571 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3573 ins->flags |= MONO_INST_NONEMPTY_STACK;
3574 MONO_ADD_INS (cfg->cbb, ins);
3579 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3581 if (mini_get_debug_options ()->better_cast_details) {
3582 int to_klass_reg = alloc_preg (cfg);
3583 int vtable_reg = alloc_preg (cfg);
3584 int klass_reg = alloc_preg (cfg);
3585 MonoBasicBlock *is_null_bb = NULL;
3589 NEW_BBLOCK (cfg, is_null_bb);
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3595 tls_get = mono_get_jit_tls_intrinsic (cfg);
3597 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3601 MONO_ADD_INS (cfg->cbb, tls_get);
3602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3605 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3606 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3607 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3610 MONO_START_BB (cfg, is_null_bb);
3612 *out_bblock = cfg->cbb;
3618 reset_cast_details (MonoCompile *cfg)
3620 /* Reset the variables holding the cast details */
3621 if (mini_get_debug_options ()->better_cast_details) {
3622 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3624 MONO_ADD_INS (cfg->cbb, tls_get);
3625 /* It is enough to reset the from field */
3626 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3631 * On return the caller must check @array_class for load errors
3634 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3636 int vtable_reg = alloc_preg (cfg);
3639 context_used = mini_class_check_context_used (cfg, array_class);
3641 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3643 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3645 if (cfg->opt & MONO_OPT_SHARED) {
3646 int class_reg = alloc_preg (cfg);
3647 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3648 if (cfg->compile_aot) {
3649 int klass_reg = alloc_preg (cfg);
3650 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3651 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3653 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3655 } else if (context_used) {
3656 MonoInst *vtable_ins;
3658 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3659 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3661 if (cfg->compile_aot) {
3665 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3667 vt_reg = alloc_preg (cfg);
3668 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3669 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3672 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3678 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3680 reset_cast_details (cfg);
3684 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3685 * generic code is generated.
3688 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3690 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3693 MonoInst *rgctx, *addr;
3695 /* FIXME: What if the class is shared? We might not
3696 have to get the address of the method from the
3698 addr = emit_get_rgctx_method (cfg, context_used, method,
3699 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3701 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3703 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3705 gboolean pass_vtable, pass_mrgctx;
3706 MonoInst *rgctx_arg = NULL;
3708 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3709 g_assert (!pass_mrgctx);
3712 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3715 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3718 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3723 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3727 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3728 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3729 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3730 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3732 obj_reg = sp [0]->dreg;
3733 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3734 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3736 /* FIXME: generics */
3737 g_assert (klass->rank == 0);
3740 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3741 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3743 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3744 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3747 MonoInst *element_class;
3749 /* This assertion is from the unboxcast insn */
3750 g_assert (klass->rank == 0);
3752 element_class = emit_get_rgctx_klass (cfg, context_used,
3753 klass->element_class, MONO_RGCTX_INFO_KLASS);
3755 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3756 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3758 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3759 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3760 reset_cast_details (cfg);
3763 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3764 MONO_ADD_INS (cfg->cbb, add);
3765 add->type = STACK_MP;
3772 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3774 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3775 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3779 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3785 args [1] = klass_inst;
3788 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3790 NEW_BBLOCK (cfg, is_ref_bb);
3791 NEW_BBLOCK (cfg, is_nullable_bb);
3792 NEW_BBLOCK (cfg, end_bb);
3793 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3794 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3795 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3798 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3800 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3801 addr_reg = alloc_dreg (cfg, STACK_MP);
3805 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3806 MONO_ADD_INS (cfg->cbb, addr);
3808 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3811 MONO_START_BB (cfg, is_ref_bb);
3813 /* Save the ref to a temporary */
3814 dreg = alloc_ireg (cfg);
3815 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3816 addr->dreg = addr_reg;
3817 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3821 MONO_START_BB (cfg, is_nullable_bb);
3824 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3825 MonoInst *unbox_call;
3826 MonoMethodSignature *unbox_sig;
3829 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3831 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3832 unbox_sig->ret = &klass->byval_arg;
3833 unbox_sig->param_count = 1;
3834 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3835 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3837 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3838 addr->dreg = addr_reg;
3841 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3844 MONO_START_BB (cfg, end_bb);
3847 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3849 *out_cbb = cfg->cbb;
3855 * Returns NULL and set the cfg exception on error.
3858 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3860 MonoInst *iargs [2];
3866 MonoInst *iargs [2];
3868 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3870 if (cfg->opt & MONO_OPT_SHARED)
3871 rgctx_info = MONO_RGCTX_INFO_KLASS;
3873 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3874 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3876 if (cfg->opt & MONO_OPT_SHARED) {
3877 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3879 alloc_ftn = mono_object_new;
3882 alloc_ftn = mono_object_new_specific;
3885 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3886 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3888 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3891 if (cfg->opt & MONO_OPT_SHARED) {
3892 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3893 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3895 alloc_ftn = mono_object_new;
3896 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3897 /* This happens often in argument checking code, eg. throw new FooException... */
3898 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3899 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3900 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3902 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3903 MonoMethod *managed_alloc = NULL;
3907 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3908 cfg->exception_ptr = klass;
3912 #ifndef MONO_CROSS_COMPILE
3913 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3916 if (managed_alloc) {
3917 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3918 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3920 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3922 guint32 lw = vtable->klass->instance_size;
3923 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3924 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3925 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3928 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3932 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3936 * Returns NULL and set the cfg exception on error.
3939 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3941 MonoInst *alloc, *ins;
3943 *out_cbb = cfg->cbb;
3945 if (mono_class_is_nullable (klass)) {
3946 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3949 /* FIXME: What if the class is shared? We might not
3950 have to get the method address from the RGCTX. */
3951 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3952 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3953 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3955 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3957 gboolean pass_vtable, pass_mrgctx;
3958 MonoInst *rgctx_arg = NULL;
3960 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3961 g_assert (!pass_mrgctx);
3964 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3967 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3970 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3974 if (mini_is_gsharedvt_klass (cfg, klass)) {
3975 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3976 MonoInst *res, *is_ref, *src_var, *addr;
3979 dreg = alloc_ireg (cfg);
3981 NEW_BBLOCK (cfg, is_ref_bb);
3982 NEW_BBLOCK (cfg, is_nullable_bb);
3983 NEW_BBLOCK (cfg, end_bb);
3984 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3985 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3986 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3988 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3989 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3992 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3995 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3996 ins->opcode = OP_STOREV_MEMBASE;
3998 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3999 res->type = STACK_OBJ;
4001 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4004 MONO_START_BB (cfg, is_ref_bb);
4005 addr_reg = alloc_ireg (cfg);
4007 /* val is a vtype, so has to load the value manually */
4008 src_var = get_vreg_to_inst (cfg, val->dreg);
4010 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4011 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4012 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4013 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4016 MONO_START_BB (cfg, is_nullable_bb);
4019 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4020 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4022 MonoMethodSignature *box_sig;
4025 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4026 * construct that method at JIT time, so have to do things by hand.
4028 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4029 box_sig->ret = &mono_defaults.object_class->byval_arg;
4030 box_sig->param_count = 1;
4031 box_sig->params [0] = &klass->byval_arg;
4032 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4033 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4034 res->type = STACK_OBJ;
4038 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4040 MONO_START_BB (cfg, end_bb);
4042 *out_cbb = cfg->cbb;
4046 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4050 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4057 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4060 MonoGenericContainer *container;
4061 MonoGenericInst *ginst;
4063 if (klass->generic_class) {
4064 container = klass->generic_class->container_class->generic_container;
4065 ginst = klass->generic_class->context.class_inst;
4066 } else if (klass->generic_container && context_used) {
4067 container = klass->generic_container;
4068 ginst = container->context.class_inst;
4073 for (i = 0; i < container->type_argc; ++i) {
4075 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4077 type = ginst->type_argv [i];
4078 if (mini_type_is_reference (cfg, type))
4084 // FIXME: This doesn't work yet (class libs tests fail?)
4085 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4088 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4090 MonoMethod *mono_castclass;
4093 mono_castclass = mono_marshal_get_castclass_with_cache ();
4095 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4096 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4097 reset_cast_details (cfg);
4103 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4112 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4115 if (cfg->compile_aot) {
4116 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4117 cfg->castclass_cache_index ++;
4118 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4119 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4121 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4124 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4126 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4130 * Returns NULL and set the cfg exception on error.
4133 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4135 MonoBasicBlock *is_null_bb;
4136 int obj_reg = src->dreg;
4137 int vtable_reg = alloc_preg (cfg);
4138 MonoInst *klass_inst = NULL;
4143 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4144 MonoInst *cache_ins;
4146 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4151 /* klass - it's the second element of the cache entry*/
4152 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4155 args [2] = cache_ins;
4157 return emit_castclass_with_cache (cfg, klass, args, NULL);
4160 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4163 NEW_BBLOCK (cfg, is_null_bb);
4165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4168 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4170 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4171 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4172 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4174 int klass_reg = alloc_preg (cfg);
4176 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4178 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4179 /* the remoting code is broken, access the class for now */
4180 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4181 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4183 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4184 cfg->exception_ptr = klass;
4187 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4189 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4190 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4192 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4195 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4199 MONO_START_BB (cfg, is_null_bb);
4201 reset_cast_details (cfg);
4207 * Returns NULL and set the cfg exception on error.
4210 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4213 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4214 int obj_reg = src->dreg;
4215 int vtable_reg = alloc_preg (cfg);
4216 int res_reg = alloc_ireg_ref (cfg);
4217 MonoInst *klass_inst = NULL;
4222 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4223 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4224 MonoInst *cache_ins;
4226 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4231 /* klass - it's the second element of the cache entry*/
4232 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4235 args [2] = cache_ins;
4237 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4240 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4243 NEW_BBLOCK (cfg, is_null_bb);
4244 NEW_BBLOCK (cfg, false_bb);
4245 NEW_BBLOCK (cfg, end_bb);
4247 /* Do the assignment at the beginning, so the other assignment can be if converted */
4248 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4249 ins->type = STACK_OBJ;
4252 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4253 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4255 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4257 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4258 g_assert (!context_used);
4259 /* the is_null_bb target simply copies the input register to the output */
4260 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4262 int klass_reg = alloc_preg (cfg);
4265 int rank_reg = alloc_preg (cfg);
4266 int eclass_reg = alloc_preg (cfg);
4268 g_assert (!context_used);
4269 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4270 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4271 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4272 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4273 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4274 if (klass->cast_class == mono_defaults.object_class) {
4275 int parent_reg = alloc_preg (cfg);
4276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4277 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4278 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4279 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4280 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4281 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4282 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4283 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4284 } else if (klass->cast_class == mono_defaults.enum_class) {
4285 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4286 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4287 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4288 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4290 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4291 /* Check that the object is a vector too */
4292 int bounds_reg = alloc_preg (cfg);
4293 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4294 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4295 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4298 /* the is_null_bb target simply copies the input register to the output */
4299 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4301 } else if (mono_class_is_nullable (klass)) {
4302 g_assert (!context_used);
4303 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4304 /* the is_null_bb target simply copies the input register to the output */
4305 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4307 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4308 g_assert (!context_used);
4309 /* the remoting code is broken, access the class for now */
4310 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4311 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4313 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4314 cfg->exception_ptr = klass;
4317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4319 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4322 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4323 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4325 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4326 /* the is_null_bb target simply copies the input register to the output */
4327 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4332 MONO_START_BB (cfg, false_bb);
4334 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4335 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4337 MONO_START_BB (cfg, is_null_bb);
4339 MONO_START_BB (cfg, end_bb);
4345 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4347 /* This opcode takes as input an object reference and a class, and returns:
4348 0) if the object is an instance of the class,
4349 1) if the object is not instance of the class,
4350 2) if the object is a proxy whose type cannot be determined */
4353 #ifndef DISABLE_REMOTING
4354 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4356 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4358 int obj_reg = src->dreg;
4359 int dreg = alloc_ireg (cfg);
4361 #ifndef DISABLE_REMOTING
4362 int klass_reg = alloc_preg (cfg);
4365 NEW_BBLOCK (cfg, true_bb);
4366 NEW_BBLOCK (cfg, false_bb);
4367 NEW_BBLOCK (cfg, end_bb);
4368 #ifndef DISABLE_REMOTING
4369 NEW_BBLOCK (cfg, false2_bb);
4370 NEW_BBLOCK (cfg, no_proxy_bb);
4373 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4374 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4376 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4377 #ifndef DISABLE_REMOTING
4378 NEW_BBLOCK (cfg, interface_fail_bb);
4381 tmp_reg = alloc_preg (cfg);
4382 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4383 #ifndef DISABLE_REMOTING
4384 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4385 MONO_START_BB (cfg, interface_fail_bb);
4386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4388 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4390 tmp_reg = alloc_preg (cfg);
4391 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4393 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4395 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4398 #ifndef DISABLE_REMOTING
4399 tmp_reg = alloc_preg (cfg);
4400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4401 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4403 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4404 tmp_reg = alloc_preg (cfg);
4405 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4408 tmp_reg = alloc_preg (cfg);
4409 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4411 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4413 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4414 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4416 MONO_START_BB (cfg, no_proxy_bb);
4418 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4420 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4424 MONO_START_BB (cfg, false_bb);
4426 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4427 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4429 #ifndef DISABLE_REMOTING
4430 MONO_START_BB (cfg, false2_bb);
4432 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4433 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4436 MONO_START_BB (cfg, true_bb);
4438 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4440 MONO_START_BB (cfg, end_bb);
4443 MONO_INST_NEW (cfg, ins, OP_ICONST);
4445 ins->type = STACK_I4;
4451 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4453 /* This opcode takes as input an object reference and a class, and returns:
4454 0) if the object is an instance of the class,
4455 1) if the object is a proxy whose type cannot be determined
4456 an InvalidCastException exception is thrown otherwhise*/
4459 #ifndef DISABLE_REMOTING
4460 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4462 MonoBasicBlock *ok_result_bb;
4464 int obj_reg = src->dreg;
4465 int dreg = alloc_ireg (cfg);
4466 int tmp_reg = alloc_preg (cfg);
4468 #ifndef DISABLE_REMOTING
4469 int klass_reg = alloc_preg (cfg);
4470 NEW_BBLOCK (cfg, end_bb);
4473 NEW_BBLOCK (cfg, ok_result_bb);
4475 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4476 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4478 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4480 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4481 #ifndef DISABLE_REMOTING
4482 NEW_BBLOCK (cfg, interface_fail_bb);
4484 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4485 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4486 MONO_START_BB (cfg, interface_fail_bb);
4487 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4489 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4491 tmp_reg = alloc_preg (cfg);
4492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4494 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4496 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4497 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4500 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4501 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4504 #ifndef DISABLE_REMOTING
4505 NEW_BBLOCK (cfg, no_proxy_bb);
4507 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4508 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4509 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4511 tmp_reg = alloc_preg (cfg);
4512 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4513 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4515 tmp_reg = alloc_preg (cfg);
4516 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4520 NEW_BBLOCK (cfg, fail_1_bb);
4522 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4524 MONO_START_BB (cfg, fail_1_bb);
4526 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4529 MONO_START_BB (cfg, no_proxy_bb);
4531 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4533 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4537 MONO_START_BB (cfg, ok_result_bb);
4539 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4541 #ifndef DISABLE_REMOTING
4542 MONO_START_BB (cfg, end_bb);
4546 MONO_INST_NEW (cfg, ins, OP_ICONST);
4548 ins->type = STACK_I4;
4554 * Returns NULL and set the cfg exception on error.
4556 static G_GNUC_UNUSED MonoInst*
4557 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4561 gpointer trampoline;
4562 MonoInst *obj, *method_ins, *tramp_ins;
4566 // FIXME reenable optimisation for virtual case
4571 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4574 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4578 obj = handle_alloc (cfg, klass, FALSE, 0);
4582 /* Inline the contents of mono_delegate_ctor */
4584 /* Set target field */
4585 /* Optimize away setting of NULL target */
4586 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4587 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4588 if (cfg->gen_write_barriers) {
4589 dreg = alloc_preg (cfg);
4590 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4591 emit_write_barrier (cfg, ptr, target);
4595 /* Set method field */
4596 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4597 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4600 * To avoid looking up the compiled code belonging to the target method
4601 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4602 * store it, and we fill it after the method has been compiled.
4604 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4605 MonoInst *code_slot_ins;
4608 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4610 domain = mono_domain_get ();
4611 mono_domain_lock (domain);
4612 if (!domain_jit_info (domain)->method_code_hash)
4613 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4614 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4616 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4617 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4619 mono_domain_unlock (domain);
4621 if (cfg->compile_aot)
4622 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4624 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4626 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4629 if (cfg->compile_aot) {
4630 MonoDelegateClassMethodPair *del_tramp;
4632 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4633 del_tramp->klass = klass;
4634 del_tramp->method = context_used ? NULL : method;
4635 del_tramp->virtual = virtual;
4636 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4639 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4641 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4642 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4645 /* Set invoke_impl field */
4647 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4649 dreg = alloc_preg (cfg);
4650 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4651 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4653 dreg = alloc_preg (cfg);
4654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4655 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4658 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4664 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4666 MonoJitICallInfo *info;
4668 /* Need to register the icall so it gets an icall wrapper */
4669 info = mono_get_array_new_va_icall (rank);
4671 cfg->flags |= MONO_CFG_HAS_VARARGS;
4673 /* mono_array_new_va () needs a vararg calling convention */
4674 cfg->disable_llvm = TRUE;
4676 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4677 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4681 mono_emit_load_got_addr (MonoCompile *cfg)
4683 MonoInst *getaddr, *dummy_use;
4685 if (!cfg->got_var || cfg->got_var_allocated)
4688 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4689 getaddr->cil_code = cfg->header->code;
4690 getaddr->dreg = cfg->got_var->dreg;
4692 /* Add it to the start of the first bblock */
4693 if (cfg->bb_entry->code) {
4694 getaddr->next = cfg->bb_entry->code;
4695 cfg->bb_entry->code = getaddr;
4698 MONO_ADD_INS (cfg->bb_entry, getaddr);
4700 cfg->got_var_allocated = TRUE;
4703 * Add a dummy use to keep the got_var alive, since real uses might
4704 * only be generated by the back ends.
4705 * Add it to end_bblock, so the variable's lifetime covers the whole
4707 * It would be better to make the usage of the got var explicit in all
4708 * cases when the backend needs it (i.e. calls, throw etc.), so this
4709 * wouldn't be needed.
4711 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4712 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4715 static int inline_limit;
4716 static gboolean inline_limit_inited;
4719 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4721 MonoMethodHeaderSummary header;
4723 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4724 MonoMethodSignature *sig = mono_method_signature (method);
4728 if (cfg->disable_inline)
4730 if (cfg->generic_sharing_context)
4733 if (cfg->inline_depth > 10)
4736 #ifdef MONO_ARCH_HAVE_LMF_OPS
4737 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4738 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4739 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4744 if (!mono_method_get_header_summary (method, &header))
4747 /*runtime, icall and pinvoke are checked by summary call*/
4748 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4749 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4750 (mono_class_is_marshalbyref (method->klass)) ||
4754 /* also consider num_locals? */
4755 /* Do the size check early to avoid creating vtables */
4756 if (!inline_limit_inited) {
4757 if (g_getenv ("MONO_INLINELIMIT"))
4758 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4760 inline_limit = INLINE_LENGTH_LIMIT;
4761 inline_limit_inited = TRUE;
4763 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4767 * if we can initialize the class of the method right away, we do,
4768 * otherwise we don't allow inlining if the class needs initialization,
4769 * since it would mean inserting a call to mono_runtime_class_init()
4770 * inside the inlined code
4772 if (!(cfg->opt & MONO_OPT_SHARED)) {
4773 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4774 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4775 vtable = mono_class_vtable (cfg->domain, method->klass);
4778 if (!cfg->compile_aot)
4779 mono_runtime_class_init (vtable);
4780 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4781 if (cfg->run_cctors && method->klass->has_cctor) {
4782 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4783 if (!method->klass->runtime_info)
4784 /* No vtable created yet */
4786 vtable = mono_class_vtable (cfg->domain, method->klass);
4789 /* This makes so that inline cannot trigger */
4790 /* .cctors: too many apps depend on them */
4791 /* running with a specific order... */
4792 if (! vtable->initialized)
4794 mono_runtime_class_init (vtable);
4796 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4797 if (!method->klass->runtime_info)
4798 /* No vtable created yet */
4800 vtable = mono_class_vtable (cfg->domain, method->klass);
4803 if (!vtable->initialized)
4808 * If we're compiling for shared code
4809 * the cctor will need to be run at aot method load time, for example,
4810 * or at the end of the compilation of the inlining method.
4812 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4817 * CAS - do not inline methods with declarative security
4818 * Note: this has to be before any possible return TRUE;
4820 if (mono_security_method_has_declsec (method))
4823 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4824 if (mono_arch_is_soft_float ()) {
4826 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4828 for (i = 0; i < sig->param_count; ++i)
4829 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4834 if (g_list_find (cfg->dont_inline, method))
4841 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4843 if (!cfg->compile_aot) {
4845 if (vtable->initialized)
4849 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4850 if (cfg->method == method)
4854 if (!mono_class_needs_cctor_run (klass, method))
4857 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4858 /* The initialization is already done before the method is called */
4865 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4869 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4872 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4875 mono_class_init (klass);
4876 size = mono_class_array_element_size (klass);
4879 mult_reg = alloc_preg (cfg);
4880 array_reg = arr->dreg;
4881 index_reg = index->dreg;
4883 #if SIZEOF_REGISTER == 8
4884 /* The array reg is 64 bits but the index reg is only 32 */
4885 if (COMPILE_LLVM (cfg)) {
4887 index2_reg = index_reg;
4889 index2_reg = alloc_preg (cfg);
4890 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4893 if (index->type == STACK_I8) {
4894 index2_reg = alloc_preg (cfg);
4895 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4897 index2_reg = index_reg;
4902 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4904 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4905 if (size == 1 || size == 2 || size == 4 || size == 8) {
4906 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4908 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4909 ins->klass = mono_class_get_element_class (klass);
4910 ins->type = STACK_MP;
4916 add_reg = alloc_ireg_mp (cfg);
4919 MonoInst *rgctx_ins;
4922 g_assert (cfg->generic_sharing_context);
4923 context_used = mini_class_check_context_used (cfg, klass);
4924 g_assert (context_used);
4925 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4926 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4928 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4930 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4931 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4932 ins->klass = mono_class_get_element_class (klass);
4933 ins->type = STACK_MP;
4934 MONO_ADD_INS (cfg->cbb, ins);
4939 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4941 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4943 int bounds_reg = alloc_preg (cfg);
4944 int add_reg = alloc_ireg_mp (cfg);
4945 int mult_reg = alloc_preg (cfg);
4946 int mult2_reg = alloc_preg (cfg);
4947 int low1_reg = alloc_preg (cfg);
4948 int low2_reg = alloc_preg (cfg);
4949 int high1_reg = alloc_preg (cfg);
4950 int high2_reg = alloc_preg (cfg);
4951 int realidx1_reg = alloc_preg (cfg);
4952 int realidx2_reg = alloc_preg (cfg);
4953 int sum_reg = alloc_preg (cfg);
4954 int index1, index2, tmpreg;
4958 mono_class_init (klass);
4959 size = mono_class_array_element_size (klass);
4961 index1 = index_ins1->dreg;
4962 index2 = index_ins2->dreg;
4964 #if SIZEOF_REGISTER == 8
4965 /* The array reg is 64 bits but the index reg is only 32 */
4966 if (COMPILE_LLVM (cfg)) {
4969 tmpreg = alloc_preg (cfg);
4970 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4972 tmpreg = alloc_preg (cfg);
4973 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4977 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4981 /* range checking */
4982 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4983 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4985 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4986 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4987 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4988 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4989 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4990 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4991 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4993 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4994 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4995 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4996 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4997 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4998 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4999 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5001 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5002 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5003 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5004 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5005 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5007 ins->type = STACK_MP;
5009 MONO_ADD_INS (cfg->cbb, ins);
5016 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5020 MonoMethod *addr_method;
5023 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5026 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
5028 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5029 /* emit_ldelema_2 depends on OP_LMUL */
5030 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
5031 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
5035 element_size = mono_class_array_element_size (cmethod->klass->element_class);
5036 addr_method = mono_marshal_get_array_address (rank, element_size);
5037 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5042 static MonoBreakPolicy
5043 always_insert_breakpoint (MonoMethod *method)
5045 return MONO_BREAK_POLICY_ALWAYS;
5048 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5051 * mono_set_break_policy:
5052 * policy_callback: the new callback function
5054 * Allow embedders to decide wherther to actually obey breakpoint instructions
5055 * (both break IL instructions and Debugger.Break () method calls), for example
5056 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5057 * untrusted or semi-trusted code.
5059 * @policy_callback will be called every time a break point instruction needs to
5060 * be inserted with the method argument being the method that calls Debugger.Break()
5061 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5062 * if it wants the breakpoint to not be effective in the given method.
5063 * #MONO_BREAK_POLICY_ALWAYS is the default.
5066 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5068 if (policy_callback)
5069 break_policy_func = policy_callback;
5071 break_policy_func = always_insert_breakpoint;
5075 should_insert_brekpoint (MonoMethod *method) {
5076 switch (break_policy_func (method)) {
5077 case MONO_BREAK_POLICY_ALWAYS:
5079 case MONO_BREAK_POLICY_NEVER:
5081 case MONO_BREAK_POLICY_ON_DBG:
5082 g_warning ("mdb no longer supported");
5085 g_warning ("Incorrect value returned from break policy callback");
5090 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5092 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5094 MonoInst *addr, *store, *load;
5095 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5097 /* the bounds check is already done by the callers */
5098 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5100 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5101 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5102 if (mini_type_is_reference (cfg, fsig->params [2]))
5103 emit_write_barrier (cfg, addr, load);
5105 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5106 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5113 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5115 return mini_type_is_reference (cfg, &klass->byval_arg);
5119 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5121 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5122 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5123 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5124 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5125 MonoInst *iargs [3];
5128 mono_class_setup_vtable (obj_array);
5129 g_assert (helper->slot);
5131 if (sp [0]->type != STACK_OBJ)
5133 if (sp [2]->type != STACK_OBJ)
5140 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5144 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5147 // FIXME-VT: OP_ICONST optimization
5148 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5149 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5150 ins->opcode = OP_STOREV_MEMBASE;
5151 } else if (sp [1]->opcode == OP_ICONST) {
5152 int array_reg = sp [0]->dreg;
5153 int index_reg = sp [1]->dreg;
5154 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5157 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5158 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5160 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5161 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5162 if (generic_class_is_reference_type (cfg, klass))
5163 emit_write_barrier (cfg, addr, sp [2]);
5170 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5175 eklass = mono_class_from_mono_type (fsig->params [2]);
5177 eklass = mono_class_from_mono_type (fsig->ret);
5180 return emit_array_store (cfg, eklass, args, FALSE);
5182 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5183 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5189 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5193 //Only allow for valuetypes
5194 if (!param_klass->valuetype || !return_klass->valuetype)
5198 if (param_klass->has_references || return_klass->has_references)
5201 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5202 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5203 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5206 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5207 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5210 //And have the same size
5211 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5217 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5219 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5220 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5222 //Valuetypes that are semantically equivalent
5223 if (is_unsafe_mov_compatible (param_klass, return_klass))
5226 //Arrays of valuetypes that are semantically equivalent
5227 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5234 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5236 #ifdef MONO_ARCH_SIMD_INTRINSICS
5237 MonoInst *ins = NULL;
5239 if (cfg->opt & MONO_OPT_SIMD) {
5240 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5246 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5250 emit_memory_barrier (MonoCompile *cfg, int kind)
5252 MonoInst *ins = NULL;
5253 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5254 MONO_ADD_INS (cfg->cbb, ins);
5255 ins->backend.memory_barrier_kind = kind;
5261 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5263 MonoInst *ins = NULL;
5266 /* The LLVM backend supports these intrinsics */
5267 if (cmethod->klass == mono_defaults.math_class) {
5268 if (strcmp (cmethod->name, "Sin") == 0) {
5270 } else if (strcmp (cmethod->name, "Cos") == 0) {
5272 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5274 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5279 MONO_INST_NEW (cfg, ins, opcode);
5280 ins->type = STACK_R8;
5281 ins->dreg = mono_alloc_freg (cfg);
5282 ins->sreg1 = args [0]->dreg;
5283 MONO_ADD_INS (cfg->cbb, ins);
5287 if (cfg->opt & MONO_OPT_CMOV) {
5288 if (strcmp (cmethod->name, "Min") == 0) {
5289 if (fsig->params [0]->type == MONO_TYPE_I4)
5291 if (fsig->params [0]->type == MONO_TYPE_U4)
5292 opcode = OP_IMIN_UN;
5293 else if (fsig->params [0]->type == MONO_TYPE_I8)
5295 else if (fsig->params [0]->type == MONO_TYPE_U8)
5296 opcode = OP_LMIN_UN;
5297 } else if (strcmp (cmethod->name, "Max") == 0) {
5298 if (fsig->params [0]->type == MONO_TYPE_I4)
5300 if (fsig->params [0]->type == MONO_TYPE_U4)
5301 opcode = OP_IMAX_UN;
5302 else if (fsig->params [0]->type == MONO_TYPE_I8)
5304 else if (fsig->params [0]->type == MONO_TYPE_U8)
5305 opcode = OP_LMAX_UN;
5310 MONO_INST_NEW (cfg, ins, opcode);
5311 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5312 ins->dreg = mono_alloc_ireg (cfg);
5313 ins->sreg1 = args [0]->dreg;
5314 ins->sreg2 = args [1]->dreg;
5315 MONO_ADD_INS (cfg->cbb, ins);
5323 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5325 if (cmethod->klass == mono_defaults.array_class) {
5326 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5327 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5328 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5329 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5330 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5331 return emit_array_unsafe_mov (cfg, fsig, args);
5338 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5340 MonoInst *ins = NULL;
5342 static MonoClass *runtime_helpers_class = NULL;
5343 if (! runtime_helpers_class)
5344 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5345 "System.Runtime.CompilerServices", "RuntimeHelpers");
5347 if (cmethod->klass == mono_defaults.string_class) {
5348 if (strcmp (cmethod->name, "get_Chars") == 0) {
5349 int dreg = alloc_ireg (cfg);
5350 int index_reg = alloc_preg (cfg);
5351 int mult_reg = alloc_preg (cfg);
5352 int add_reg = alloc_preg (cfg);
5354 #if SIZEOF_REGISTER == 8
5355 /* The array reg is 64 bits but the index reg is only 32 */
5356 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5358 index_reg = args [1]->dreg;
5360 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5362 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5363 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5364 add_reg = ins->dreg;
5365 /* Avoid a warning */
5367 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5370 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5371 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5372 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5373 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5375 type_from_op (ins, NULL, NULL);
5377 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5378 int dreg = alloc_ireg (cfg);
5379 /* Decompose later to allow more optimizations */
5380 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5381 ins->type = STACK_I4;
5382 ins->flags |= MONO_INST_FAULT;
5383 cfg->cbb->has_array_access = TRUE;
5384 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5387 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5388 int mult_reg = alloc_preg (cfg);
5389 int add_reg = alloc_preg (cfg);
5391 /* The corlib functions check for oob already. */
5392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5393 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5394 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5395 return cfg->cbb->last_ins;
5398 } else if (cmethod->klass == mono_defaults.object_class) {
5400 if (strcmp (cmethod->name, "GetType") == 0) {
5401 int dreg = alloc_ireg_ref (cfg);
5402 int vt_reg = alloc_preg (cfg);
5403 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5404 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5405 type_from_op (ins, NULL, NULL);
5408 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5409 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5410 int dreg = alloc_ireg (cfg);
5411 int t1 = alloc_ireg (cfg);
5413 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5414 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5415 ins->type = STACK_I4;
5419 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5420 MONO_INST_NEW (cfg, ins, OP_NOP);
5421 MONO_ADD_INS (cfg->cbb, ins);
5425 } else if (cmethod->klass == mono_defaults.array_class) {
5426 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5427 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5429 #ifndef MONO_BIG_ARRAYS
5431 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5434 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5435 int dreg = alloc_ireg (cfg);
5436 int bounds_reg = alloc_ireg_mp (cfg);
5437 MonoBasicBlock *end_bb, *szarray_bb;
5438 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5440 NEW_BBLOCK (cfg, end_bb);
5441 NEW_BBLOCK (cfg, szarray_bb);
5443 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5444 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5445 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5446 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5447 /* Non-szarray case */
5449 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5450 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5452 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5453 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5454 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5455 MONO_START_BB (cfg, szarray_bb);
5458 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5459 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5461 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5462 MONO_START_BB (cfg, end_bb);
5464 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5465 ins->type = STACK_I4;
5471 if (cmethod->name [0] != 'g')
5474 if (strcmp (cmethod->name, "get_Rank") == 0) {
5475 int dreg = alloc_ireg (cfg);
5476 int vtable_reg = alloc_preg (cfg);
5477 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5478 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5479 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5480 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5481 type_from_op (ins, NULL, NULL);
5484 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5485 int dreg = alloc_ireg (cfg);
5487 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5488 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5489 type_from_op (ins, NULL, NULL);
5494 } else if (cmethod->klass == runtime_helpers_class) {
5496 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5497 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5501 } else if (cmethod->klass == mono_defaults.thread_class) {
5502 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5503 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5504 MONO_ADD_INS (cfg->cbb, ins);
5506 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5507 return emit_memory_barrier (cfg, FullBarrier);
5509 } else if (cmethod->klass == mono_defaults.monitor_class) {
5511 /* FIXME this should be integrated to the check below once we support the trampoline version */
5512 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5513 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5514 MonoMethod *fast_method = NULL;
5516 /* Avoid infinite recursion */
5517 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5520 fast_method = mono_monitor_get_fast_path (cmethod);
5524 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5528 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5529 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5532 if (COMPILE_LLVM (cfg)) {
5534 * Pass the argument normally, the LLVM backend will handle the
5535 * calling convention problems.
5537 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5539 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5540 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5541 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5542 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5545 return (MonoInst*)call;
5546 } else if (strcmp (cmethod->name, "Exit") == 0) {
5549 if (COMPILE_LLVM (cfg)) {
5550 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5552 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5553 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5554 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5555 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5558 return (MonoInst*)call;
5560 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5562 MonoMethod *fast_method = NULL;
5564 /* Avoid infinite recursion */
5565 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5566 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5567 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5570 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5571 strcmp (cmethod->name, "Exit") == 0)
5572 fast_method = mono_monitor_get_fast_path (cmethod);
5576 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5579 } else if (cmethod->klass->image == mono_defaults.corlib &&
5580 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5581 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5584 #if SIZEOF_REGISTER == 8
5585 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5588 emit_memory_barrier (cfg, FullBarrier);
5590 /* 64 bit reads are already atomic */
5591 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5592 load_ins->dreg = mono_alloc_preg (cfg);
5593 load_ins->inst_basereg = args [0]->dreg;
5594 load_ins->inst_offset = 0;
5595 MONO_ADD_INS (cfg->cbb, load_ins);
5597 emit_memory_barrier (cfg, FullBarrier);
5603 if (strcmp (cmethod->name, "Increment") == 0) {
5604 MonoInst *ins_iconst;
5607 if (fsig->params [0]->type == MONO_TYPE_I4) {
5608 opcode = OP_ATOMIC_ADD_I4;
5609 cfg->has_atomic_add_i4 = TRUE;
5611 #if SIZEOF_REGISTER == 8
5612 else if (fsig->params [0]->type == MONO_TYPE_I8)
5613 opcode = OP_ATOMIC_ADD_I8;
5616 if (!mono_arch_opcode_supported (opcode))
5618 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5619 ins_iconst->inst_c0 = 1;
5620 ins_iconst->dreg = mono_alloc_ireg (cfg);
5621 MONO_ADD_INS (cfg->cbb, ins_iconst);
5623 MONO_INST_NEW (cfg, ins, opcode);
5624 ins->dreg = mono_alloc_ireg (cfg);
5625 ins->inst_basereg = args [0]->dreg;
5626 ins->inst_offset = 0;
5627 ins->sreg2 = ins_iconst->dreg;
5628 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5629 MONO_ADD_INS (cfg->cbb, ins);
5631 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5632 MonoInst *ins_iconst;
5635 if (fsig->params [0]->type == MONO_TYPE_I4) {
5636 opcode = OP_ATOMIC_ADD_I4;
5637 cfg->has_atomic_add_i4 = TRUE;
5639 #if SIZEOF_REGISTER == 8
5640 else if (fsig->params [0]->type == MONO_TYPE_I8)
5641 opcode = OP_ATOMIC_ADD_I8;
5644 if (!mono_arch_opcode_supported (opcode))
5646 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5647 ins_iconst->inst_c0 = -1;
5648 ins_iconst->dreg = mono_alloc_ireg (cfg);
5649 MONO_ADD_INS (cfg->cbb, ins_iconst);
5651 MONO_INST_NEW (cfg, ins, opcode);
5652 ins->dreg = mono_alloc_ireg (cfg);
5653 ins->inst_basereg = args [0]->dreg;
5654 ins->inst_offset = 0;
5655 ins->sreg2 = ins_iconst->dreg;
5656 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5657 MONO_ADD_INS (cfg->cbb, ins);
5659 } else if (strcmp (cmethod->name, "Add") == 0) {
5662 if (fsig->params [0]->type == MONO_TYPE_I4) {
5663 opcode = OP_ATOMIC_ADD_I4;
5664 cfg->has_atomic_add_i4 = TRUE;
5666 #if SIZEOF_REGISTER == 8
5667 else if (fsig->params [0]->type == MONO_TYPE_I8)
5668 opcode = OP_ATOMIC_ADD_I8;
5671 if (!mono_arch_opcode_supported (opcode))
5673 MONO_INST_NEW (cfg, ins, opcode);
5674 ins->dreg = mono_alloc_ireg (cfg);
5675 ins->inst_basereg = args [0]->dreg;
5676 ins->inst_offset = 0;
5677 ins->sreg2 = args [1]->dreg;
5678 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5679 MONO_ADD_INS (cfg->cbb, ins);
5683 if (strcmp (cmethod->name, "Exchange") == 0) {
5685 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5687 if (fsig->params [0]->type == MONO_TYPE_I4) {
5688 opcode = OP_ATOMIC_EXCHANGE_I4;
5689 cfg->has_atomic_exchange_i4 = TRUE;
5691 #if SIZEOF_REGISTER == 8
5692 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5693 (fsig->params [0]->type == MONO_TYPE_I))
5694 opcode = OP_ATOMIC_EXCHANGE_I8;
5696 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5697 opcode = OP_ATOMIC_EXCHANGE_I4;
5698 cfg->has_atomic_exchange_i4 = TRUE;
5704 if (!mono_arch_opcode_supported (opcode))
5707 MONO_INST_NEW (cfg, ins, opcode);
5708 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5709 ins->inst_basereg = args [0]->dreg;
5710 ins->inst_offset = 0;
5711 ins->sreg2 = args [1]->dreg;
5712 MONO_ADD_INS (cfg->cbb, ins);
5714 switch (fsig->params [0]->type) {
5716 ins->type = STACK_I4;
5720 ins->type = STACK_I8;
5722 case MONO_TYPE_OBJECT:
5723 ins->type = STACK_OBJ;
5726 g_assert_not_reached ();
5729 if (cfg->gen_write_barriers && is_ref)
5730 emit_write_barrier (cfg, args [0], args [1]);
5733 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5735 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5736 if (fsig->params [1]->type == MONO_TYPE_I4)
5738 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5739 size = sizeof (gpointer);
5740 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5743 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5745 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5746 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5747 ins->sreg1 = args [0]->dreg;
5748 ins->sreg2 = args [1]->dreg;
5749 ins->sreg3 = args [2]->dreg;
5750 ins->type = STACK_I4;
5751 MONO_ADD_INS (cfg->cbb, ins);
5752 cfg->has_atomic_cas_i4 = TRUE;
5753 } else if (size == 8) {
5754 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5756 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5757 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5758 ins->sreg1 = args [0]->dreg;
5759 ins->sreg2 = args [1]->dreg;
5760 ins->sreg3 = args [2]->dreg;
5761 ins->type = STACK_I8;
5762 MONO_ADD_INS (cfg->cbb, ins);
5764 /* g_assert_not_reached (); */
5766 if (cfg->gen_write_barriers && is_ref)
5767 emit_write_barrier (cfg, args [0], args [1]);
5770 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5771 ins = emit_memory_barrier (cfg, FullBarrier);
5775 } else if (cmethod->klass->image == mono_defaults.corlib) {
5776 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5777 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5778 if (should_insert_brekpoint (cfg->method)) {
5779 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5781 MONO_INST_NEW (cfg, ins, OP_NOP);
5782 MONO_ADD_INS (cfg->cbb, ins);
5786 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5787 && strcmp (cmethod->klass->name, "Environment") == 0) {
5789 EMIT_NEW_ICONST (cfg, ins, 1);
5791 EMIT_NEW_ICONST (cfg, ins, 0);
5795 } else if (cmethod->klass == mono_defaults.math_class) {
5797 * There is general branches code for Min/Max, but it does not work for
5799 * http://everything2.com/?node_id=1051618
5801 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5802 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5804 MonoJumpInfoToken *ji;
5807 cfg->disable_llvm = TRUE;
5809 if (args [0]->opcode == OP_GOT_ENTRY) {
5810 pi = args [0]->inst_p1;
5811 g_assert (pi->opcode == OP_PATCH_INFO);
5812 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5815 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5816 ji = args [0]->inst_p0;
5819 NULLIFY_INS (args [0]);
5822 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5823 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5824 ins->dreg = mono_alloc_ireg (cfg);
5826 ins->inst_p0 = mono_string_to_utf8 (s);
5827 MONO_ADD_INS (cfg->cbb, ins);
5832 #ifdef MONO_ARCH_SIMD_INTRINSICS
5833 if (cfg->opt & MONO_OPT_SIMD) {
5834 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5840 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5844 if (COMPILE_LLVM (cfg)) {
5845 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5850 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5854 * This entry point could be used later for arbitrary method
5857 inline static MonoInst*
5858 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5859 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5861 if (method->klass == mono_defaults.string_class) {
5862 /* managed string allocation support */
5863 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5864 MonoInst *iargs [2];
5865 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5866 MonoMethod *managed_alloc = NULL;
5868 g_assert (vtable); /*Should not fail since it System.String*/
5869 #ifndef MONO_CROSS_COMPILE
5870 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5874 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5875 iargs [1] = args [0];
5876 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5883 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5885 MonoInst *store, *temp;
5888 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5889 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5892 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5893 * would be different than the MonoInst's used to represent arguments, and
5894 * the ldelema implementation can't deal with that.
5895 * Solution: When ldelema is used on an inline argument, create a var for
5896 * it, emit ldelema on that var, and emit the saving code below in
5897 * inline_method () if needed.
5899 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5900 cfg->args [i] = temp;
5901 /* This uses cfg->args [i] which is set by the preceeding line */
5902 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5903 store->cil_code = sp [0]->cil_code;
5908 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5909 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5911 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5913 check_inline_called_method_name_limit (MonoMethod *called_method)
5916 static const char *limit = NULL;
5918 if (limit == NULL) {
5919 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5921 if (limit_string != NULL)
5922 limit = limit_string;
5927 if (limit [0] != '\0') {
5928 char *called_method_name = mono_method_full_name (called_method, TRUE);
5930 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5931 g_free (called_method_name);
5933 //return (strncmp_result <= 0);
5934 return (strncmp_result == 0);
5941 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5943 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5946 static const char *limit = NULL;
5948 if (limit == NULL) {
5949 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5950 if (limit_string != NULL) {
5951 limit = limit_string;
5957 if (limit [0] != '\0') {
5958 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5960 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5961 g_free (caller_method_name);
5963 //return (strncmp_result <= 0);
5964 return (strncmp_result == 0);
5972 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5974 static double r8_0 = 0.0;
5978 rtype = mini_replace_type (rtype);
5982 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5983 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5984 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5985 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5986 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5987 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5988 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5989 ins->type = STACK_R8;
5990 ins->inst_p0 = (void*)&r8_0;
5992 MONO_ADD_INS (cfg->cbb, ins);
5993 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5994 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5995 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5996 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5997 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5999 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6004 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6008 rtype = mini_replace_type (rtype);
6012 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6013 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6014 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6015 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6016 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6017 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6018 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6019 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6020 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6021 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6022 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6023 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6025 emit_init_rvar (cfg, dreg, rtype);
6029 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6031 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6033 MonoInst *var = cfg->locals [local];
6034 if (COMPILE_SOFT_FLOAT (cfg)) {
6036 int reg = alloc_dreg (cfg, var->type);
6037 emit_init_rvar (cfg, reg, type);
6038 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6041 emit_init_rvar (cfg, var->dreg, type);
6043 emit_dummy_init_rvar (cfg, var->dreg, type);
6050 * Return the cost of inlining CMETHOD.
6053 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6054 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6056 MonoInst *ins, *rvar = NULL;
6057 MonoMethodHeader *cheader;
6058 MonoBasicBlock *ebblock, *sbblock;
6060 MonoMethod *prev_inlined_method;
6061 MonoInst **prev_locals, **prev_args;
6062 MonoType **prev_arg_types;
6063 guint prev_real_offset;
6064 GHashTable *prev_cbb_hash;
6065 MonoBasicBlock **prev_cil_offset_to_bb;
6066 MonoBasicBlock *prev_cbb;
6067 unsigned char* prev_cil_start;
6068 guint32 prev_cil_offset_to_bb_len;
6069 MonoMethod *prev_current_method;
6070 MonoGenericContext *prev_generic_context;
6071 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6073 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6075 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6076 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6079 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6080 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6084 if (cfg->verbose_level > 2)
6085 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6087 if (!cmethod->inline_info) {
6088 cfg->stat_inlineable_methods++;
6089 cmethod->inline_info = 1;
6092 /* allocate local variables */
6093 cheader = mono_method_get_header (cmethod);
6095 if (cheader == NULL || mono_loader_get_last_error ()) {
6096 MonoLoaderError *error = mono_loader_get_last_error ();
6099 mono_metadata_free_mh (cheader);
6100 if (inline_always && error)
6101 mono_cfg_set_exception (cfg, error->exception_type);
6103 mono_loader_clear_error ();
6107 /*Must verify before creating locals as it can cause the JIT to assert.*/
6108 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6109 mono_metadata_free_mh (cheader);
6113 /* allocate space to store the return value */
6114 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6115 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6118 prev_locals = cfg->locals;
6119 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6120 for (i = 0; i < cheader->num_locals; ++i)
6121 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6123 /* allocate start and end blocks */
6124 /* This is needed so if the inline is aborted, we can clean up */
6125 NEW_BBLOCK (cfg, sbblock);
6126 sbblock->real_offset = real_offset;
6128 NEW_BBLOCK (cfg, ebblock);
6129 ebblock->block_num = cfg->num_bblocks++;
6130 ebblock->real_offset = real_offset;
6132 prev_args = cfg->args;
6133 prev_arg_types = cfg->arg_types;
6134 prev_inlined_method = cfg->inlined_method;
6135 cfg->inlined_method = cmethod;
6136 cfg->ret_var_set = FALSE;
6137 cfg->inline_depth ++;
6138 prev_real_offset = cfg->real_offset;
6139 prev_cbb_hash = cfg->cbb_hash;
6140 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6141 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6142 prev_cil_start = cfg->cil_start;
6143 prev_cbb = cfg->cbb;
6144 prev_current_method = cfg->current_method;
6145 prev_generic_context = cfg->generic_context;
6146 prev_ret_var_set = cfg->ret_var_set;
6147 prev_disable_inline = cfg->disable_inline;
6149 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6152 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6154 ret_var_set = cfg->ret_var_set;
6156 cfg->inlined_method = prev_inlined_method;
6157 cfg->real_offset = prev_real_offset;
6158 cfg->cbb_hash = prev_cbb_hash;
6159 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6160 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6161 cfg->cil_start = prev_cil_start;
6162 cfg->locals = prev_locals;
6163 cfg->args = prev_args;
6164 cfg->arg_types = prev_arg_types;
6165 cfg->current_method = prev_current_method;
6166 cfg->generic_context = prev_generic_context;
6167 cfg->ret_var_set = prev_ret_var_set;
6168 cfg->disable_inline = prev_disable_inline;
6169 cfg->inline_depth --;
6171 if ((costs >= 0 && costs < 60) || inline_always) {
6172 if (cfg->verbose_level > 2)
6173 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6175 cfg->stat_inlined_methods++;
6177 /* always add some code to avoid block split failures */
6178 MONO_INST_NEW (cfg, ins, OP_NOP);
6179 MONO_ADD_INS (prev_cbb, ins);
6181 prev_cbb->next_bb = sbblock;
6182 link_bblock (cfg, prev_cbb, sbblock);
6185 * Get rid of the begin and end bblocks if possible to aid local
6188 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6190 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6191 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6193 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6194 MonoBasicBlock *prev = ebblock->in_bb [0];
6195 mono_merge_basic_blocks (cfg, prev, ebblock);
6197 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6198 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6199 cfg->cbb = prev_cbb;
6203 * Its possible that the rvar is set in some prev bblock, but not in others.
6209 for (i = 0; i < ebblock->in_count; ++i) {
6210 bb = ebblock->in_bb [i];
6212 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6215 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6223 *out_cbb = cfg->cbb;
6227 * If the inlined method contains only a throw, then the ret var is not
6228 * set, so set it to a dummy value.
6231 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6233 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6236 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6239 if (cfg->verbose_level > 2)
6240 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6241 cfg->exception_type = MONO_EXCEPTION_NONE;
6242 mono_loader_clear_error ();
6244 /* This gets rid of the newly added bblocks */
6245 cfg->cbb = prev_cbb;
6247 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6252 * Some of these comments may well be out-of-date.
6253 * Design decisions: we do a single pass over the IL code (and we do bblock
6254 * splitting/merging in the few cases when it's required: a back jump to an IL
6255 * address that was not already seen as bblock starting point).
6256 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6257 * Complex operations are decomposed in simpler ones right away. We need to let the
6258 * arch-specific code peek and poke inside this process somehow (except when the
6259 * optimizations can take advantage of the full semantic info of coarse opcodes).
6260 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6261 * MonoInst->opcode initially is the IL opcode or some simplification of that
6262 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6263 * opcode with value bigger than OP_LAST.
6264 * At this point the IR can be handed over to an interpreter, a dumb code generator
6265 * or to the optimizing code generator that will translate it to SSA form.
6267 * Profiling directed optimizations.
6268 * We may compile by default with few or no optimizations and instrument the code
6269 * or the user may indicate what methods to optimize the most either in a config file
6270 * or through repeated runs where the compiler applies offline the optimizations to
6271 * each method and then decides if it was worth it.
6274 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6275 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6276 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6277 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6278 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6279 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6280 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6281 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6283 /* offset from br.s -> br like opcodes */
6284 #define BIG_BRANCH_OFFSET 13
6287 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6289 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6291 return b == NULL || b == bb;
6295 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6297 unsigned char *ip = start;
6298 unsigned char *target;
6301 MonoBasicBlock *bblock;
6302 const MonoOpcode *opcode;
6305 cli_addr = ip - start;
6306 i = mono_opcode_value ((const guint8 **)&ip, end);
6309 opcode = &mono_opcodes [i];
6310 switch (opcode->argument) {
6311 case MonoInlineNone:
6314 case MonoInlineString:
6315 case MonoInlineType:
6316 case MonoInlineField:
6317 case MonoInlineMethod:
6320 case MonoShortInlineR:
6327 case MonoShortInlineVar:
6328 case MonoShortInlineI:
6331 case MonoShortInlineBrTarget:
6332 target = start + cli_addr + 2 + (signed char)ip [1];
6333 GET_BBLOCK (cfg, bblock, target);
6336 GET_BBLOCK (cfg, bblock, ip);
6338 case MonoInlineBrTarget:
6339 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6340 GET_BBLOCK (cfg, bblock, target);
6343 GET_BBLOCK (cfg, bblock, ip);
6345 case MonoInlineSwitch: {
6346 guint32 n = read32 (ip + 1);
6349 cli_addr += 5 + 4 * n;
6350 target = start + cli_addr;
6351 GET_BBLOCK (cfg, bblock, target);
6353 for (j = 0; j < n; ++j) {
6354 target = start + cli_addr + (gint32)read32 (ip);
6355 GET_BBLOCK (cfg, bblock, target);
6365 g_assert_not_reached ();
6368 if (i == CEE_THROW) {
6369 unsigned char *bb_start = ip - 1;
6371 /* Find the start of the bblock containing the throw */
6373 while ((bb_start >= start) && !bblock) {
6374 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6378 bblock->out_of_line = 1;
6388 static inline MonoMethod *
6389 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6393 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6394 method = mono_method_get_wrapper_data (m, token);
6396 method = mono_class_inflate_generic_method (method, context);
6398 method = mono_get_method_full (m->klass->image, token, klass, context);
6404 static inline MonoMethod *
6405 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6407 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6409 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6415 static inline MonoClass*
6416 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6421 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6422 klass = mono_method_get_wrapper_data (method, token);
6424 klass = mono_class_inflate_generic_class (klass, context);
6426 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6427 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6430 mono_class_init (klass);
6434 static inline MonoMethodSignature*
6435 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6437 MonoMethodSignature *fsig;
6439 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6442 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6444 fsig = mono_inflate_generic_signature (fsig, context, &error);
6446 g_assert (mono_error_ok (&error));
6449 fsig = mono_metadata_parse_signature (method->klass->image, token);
6455 * Returns TRUE if the JIT should abort inlining because "callee"
6456 * is influenced by security attributes.
6459 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6463 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6467 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6468 if (result == MONO_JIT_SECURITY_OK)
6471 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6472 /* Generate code to throw a SecurityException before the actual call/link */
6473 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6476 NEW_ICONST (cfg, args [0], 4);
6477 NEW_METHODCONST (cfg, args [1], caller);
6478 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6479 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6480 /* don't hide previous results */
6481 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6482 cfg->exception_data = result;
6490 throw_exception (void)
6492 static MonoMethod *method = NULL;
6495 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6496 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6503 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6505 MonoMethod *thrower = throw_exception ();
6508 EMIT_NEW_PCONST (cfg, args [0], ex);
6509 mono_emit_method_call (cfg, thrower, args, NULL);
6513 * Return the original method is a wrapper is specified. We can only access
6514 * the custom attributes from the original method.
6517 get_original_method (MonoMethod *method)
6519 if (method->wrapper_type == MONO_WRAPPER_NONE)
6522 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6523 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6526 /* in other cases we need to find the original method */
6527 return mono_marshal_method_from_wrapper (method);
6531 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6532 MonoBasicBlock *bblock, unsigned char *ip)
6534 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6535 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6537 emit_throw_exception (cfg, ex);
6541 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6542 MonoBasicBlock *bblock, unsigned char *ip)
6544 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6545 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6547 emit_throw_exception (cfg, ex);
6551 * Check that the IL instructions at ip are the array initialization
6552 * sequence and return the pointer to the data and the size.
6555 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6558 * newarr[System.Int32]
6560 * ldtoken field valuetype ...
6561 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6563 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6564 guint32 token = read32 (ip + 7);
6565 guint32 field_token = read32 (ip + 2);
6566 guint32 field_index = field_token & 0xffffff;
6568 const char *data_ptr;
6570 MonoMethod *cmethod;
6571 MonoClass *dummy_class;
6572 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6578 *out_field_token = field_token;
6580 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6583 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6585 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6586 case MONO_TYPE_BOOLEAN:
6590 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6591 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6592 case MONO_TYPE_CHAR:
6609 if (size > mono_type_size (field->type, &dummy_align))
6612 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6613 if (!image_is_dynamic (method->klass->image)) {
6614 field_index = read32 (ip + 2) & 0xffffff;
6615 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6616 data_ptr = mono_image_rva_map (method->klass->image, rva);
6617 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6618 /* for aot code we do the lookup on load */
6619 if (aot && data_ptr)
6620 return GUINT_TO_POINTER (rva);
6622 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6624 data_ptr = mono_field_get_data (field);
6632 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6634 char *method_fname = mono_method_full_name (method, TRUE);
6636 MonoMethodHeader *header = mono_method_get_header (method);
6638 if (header->code_size == 0)
6639 method_code = g_strdup ("method body is empty.");
6641 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6642 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6643 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6644 g_free (method_fname);
6645 g_free (method_code);
6646 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6650 set_exception_object (MonoCompile *cfg, MonoException *exception)
6652 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6653 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6654 cfg->exception_ptr = exception;
6658 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6661 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6662 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6663 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6664 /* Optimize reg-reg moves away */
6666 * Can't optimize other opcodes, since sp[0] might point to
6667 * the last ins of a decomposed opcode.
6669 sp [0]->dreg = (cfg)->locals [n]->dreg;
6671 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6676 * ldloca inhibits many optimizations so try to get rid of it in common
6679 static inline unsigned char *
6680 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6690 local = read16 (ip + 2);
6694 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6695 /* From the INITOBJ case */
6696 token = read32 (ip + 2);
6697 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6698 CHECK_TYPELOAD (klass);
6699 type = mini_replace_type (&klass->byval_arg);
6700 emit_init_local (cfg, local, type, TRUE);
6708 is_exception_class (MonoClass *class)
6711 if (class == mono_defaults.exception_class)
6713 class = class->parent;
6719 * is_jit_optimizer_disabled:
6721 * Determine whenever M's assembly has a DebuggableAttribute with the
6722 * IsJITOptimizerDisabled flag set.
6725 is_jit_optimizer_disabled (MonoMethod *m)
6727 MonoAssembly *ass = m->klass->image->assembly;
6728 MonoCustomAttrInfo* attrs;
6729 static MonoClass *klass;
6731 gboolean val = FALSE;
6734 if (ass->jit_optimizer_disabled_inited)
6735 return ass->jit_optimizer_disabled;
6738 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6741 ass->jit_optimizer_disabled = FALSE;
6742 mono_memory_barrier ();
6743 ass->jit_optimizer_disabled_inited = TRUE;
6747 attrs = mono_custom_attrs_from_assembly (ass);
6749 for (i = 0; i < attrs->num_attrs; ++i) {
6750 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6753 MonoMethodSignature *sig;
6755 if (!attr->ctor || attr->ctor->klass != klass)
6757 /* Decode the attribute. See reflection.c */
6758 len = attr->data_size;
6759 p = (const char*)attr->data;
6760 g_assert (read16 (p) == 0x0001);
6763 // FIXME: Support named parameters
6764 sig = mono_method_signature (attr->ctor);
6765 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6767 /* Two boolean arguments */
6771 mono_custom_attrs_free (attrs);
6774 ass->jit_optimizer_disabled = val;
6775 mono_memory_barrier ();
6776 ass->jit_optimizer_disabled_inited = TRUE;
6782 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6784 gboolean supported_tail_call;
6787 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6788 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6790 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6793 for (i = 0; i < fsig->param_count; ++i) {
6794 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6795 /* These can point to the current method's stack */
6796 supported_tail_call = FALSE;
6798 if (fsig->hasthis && cmethod->klass->valuetype)
6799 /* this might point to the current method's stack */
6800 supported_tail_call = FALSE;
6801 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6802 supported_tail_call = FALSE;
6803 if (cfg->method->save_lmf)
6804 supported_tail_call = FALSE;
6805 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6806 supported_tail_call = FALSE;
6807 if (call_opcode != CEE_CALL)
6808 supported_tail_call = FALSE;
6810 /* Debugging support */
6812 if (supported_tail_call) {
6813 if (!mono_debug_count ())
6814 supported_tail_call = FALSE;
6818 return supported_tail_call;
6821 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6822 * it to the thread local value based on the tls_offset field. Every other kind of access to
6823 * the field causes an assert.
6826 is_magic_tls_access (MonoClassField *field)
6828 if (strcmp (field->name, "tlsdata"))
6830 if (strcmp (field->parent->name, "ThreadLocal`1"))
6832 return field->parent->image == mono_defaults.corlib;
6835 /* emits the code needed to access a managed tls var (like ThreadStatic)
6836 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6837 * pointer for the current thread.
6838 * Returns the MonoInst* representing the address of the tls var.
6841 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6844 int static_data_reg, array_reg, dreg;
6845 int offset2_reg, idx_reg;
6846 // inlined access to the tls data
6847 // idx = (offset >> 24) - 1;
6848 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6849 static_data_reg = alloc_ireg (cfg);
6850 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
6851 idx_reg = alloc_ireg (cfg);
6852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6854 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6855 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6856 array_reg = alloc_ireg (cfg);
6857 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6858 offset2_reg = alloc_ireg (cfg);
6859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6860 dreg = alloc_ireg (cfg);
6861 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6866 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6867 * this address is cached per-method in cached_tls_addr.
6870 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6872 MonoInst *load, *addr, *temp, *store, *thread_ins;
6873 MonoClassField *offset_field;
6875 if (*cached_tls_addr) {
6876 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6879 thread_ins = mono_get_thread_intrinsic (cfg);
6880 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6882 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6884 MONO_ADD_INS (cfg->cbb, thread_ins);
6886 MonoMethod *thread_method;
6887 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6888 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6890 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6891 addr->klass = mono_class_from_mono_type (tls_field->type);
6892 addr->type = STACK_MP;
6893 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6894 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6896 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6903 * Handle calls made to ctors from NEWOBJ opcodes.
6905 * REF_BBLOCK will point to the current bblock after the call.
6908 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
6909 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
6911 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
6912 MonoBasicBlock *bblock = *ref_bblock;
6914 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
6915 mono_method_is_generic_sharable (cmethod, TRUE)) {
6916 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
6917 mono_class_vtable (cfg->domain, cmethod->klass);
6918 CHECK_TYPELOAD (cmethod->klass);
6920 vtable_arg = emit_get_rgctx_method (cfg, context_used,
6921 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6924 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
6925 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6927 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6929 CHECK_TYPELOAD (cmethod->klass);
6930 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6935 /* Avoid virtual calls to ctors if possible */
6936 if (mono_class_is_marshalbyref (cmethod->klass))
6937 callvirt_this_arg = sp [0];
6939 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
6940 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
6941 CHECK_CFG_EXCEPTION;
6942 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
6943 mono_method_check_inlining (cfg, cmethod) &&
6944 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
6947 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
6948 cfg->real_offset += 5;
6950 *inline_costs += costs - 5;
6951 *ref_bblock = bblock;
6953 INLINE_FAILURE ("inline failure");
6954 // FIXME-VT: Clean this up
6955 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
6956 GSHAREDVT_FAILURE(*ip);
6957 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
6959 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
6962 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
6963 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
6964 } else if (context_used &&
6965 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
6966 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
6967 MonoInst *cmethod_addr;
6969 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
6971 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
6972 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6974 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
6976 INLINE_FAILURE ("ctor call");
6977 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
6978 callvirt_this_arg, NULL, vtable_arg);
6985 * mono_method_to_ir:
6987 * Translate the .net IL into linear IR.
6990 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6991 MonoInst *return_var, MonoInst **inline_args,
6992 guint inline_offset, gboolean is_virtual_call)
6995 MonoInst *ins, **sp, **stack_start;
6996 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6997 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6998 MonoMethod *cmethod, *method_definition;
6999 MonoInst **arg_array;
7000 MonoMethodHeader *header;
7002 guint32 token, ins_flag;
7004 MonoClass *constrained_call = NULL;
7005 unsigned char *ip, *end, *target, *err_pos;
7006 MonoMethodSignature *sig;
7007 MonoGenericContext *generic_context = NULL;
7008 MonoGenericContainer *generic_container = NULL;
7009 MonoType **param_types;
7010 int i, n, start_new_bblock, dreg;
7011 int num_calls = 0, inline_costs = 0;
7012 int breakpoint_id = 0;
7014 MonoBoolean security, pinvoke;
7015 MonoSecurityManager* secman = NULL;
7016 MonoDeclSecurityActions actions;
7017 GSList *class_inits = NULL;
7018 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7020 gboolean init_locals, seq_points, skip_dead_blocks;
7021 gboolean sym_seq_points = FALSE;
7022 MonoInst *cached_tls_addr = NULL;
7023 MonoDebugMethodInfo *minfo;
7024 MonoBitSet *seq_point_locs = NULL;
7025 MonoBitSet *seq_point_set_locs = NULL;
7027 cfg->disable_inline = is_jit_optimizer_disabled (method);
7029 /* serialization and xdomain stuff may need access to private fields and methods */
7030 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7031 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7032 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7033 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7034 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7035 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7037 dont_verify |= mono_security_smcs_hack_enabled ();
7039 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7040 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7041 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7042 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7043 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7045 image = method->klass->image;
7046 header = mono_method_get_header (method);
7048 MonoLoaderError *error;
7050 if ((error = mono_loader_get_last_error ())) {
7051 mono_cfg_set_exception (cfg, error->exception_type);
7053 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7054 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7056 goto exception_exit;
7058 generic_container = mono_method_get_generic_container (method);
7059 sig = mono_method_signature (method);
7060 num_args = sig->hasthis + sig->param_count;
7061 ip = (unsigned char*)header->code;
7062 cfg->cil_start = ip;
7063 end = ip + header->code_size;
7064 cfg->stat_cil_code_size += header->code_size;
7066 seq_points = cfg->gen_seq_points && cfg->method == method;
7067 #ifdef PLATFORM_ANDROID
7068 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7071 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7072 /* We could hit a seq point before attaching to the JIT (#8338) */
7076 if (cfg->gen_seq_points && cfg->method == method) {
7077 minfo = mono_debug_lookup_method (method);
7079 int i, n_il_offsets;
7083 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7084 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7085 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7086 sym_seq_points = TRUE;
7087 for (i = 0; i < n_il_offsets; ++i) {
7088 if (il_offsets [i] < header->code_size)
7089 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7091 g_free (il_offsets);
7092 g_free (line_numbers);
7097 * Methods without init_locals set could cause asserts in various passes
7098 * (#497220). To work around this, we emit dummy initialization opcodes
7099 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7100 * on some platforms.
7102 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7103 init_locals = header->init_locals;
7107 method_definition = method;
7108 while (method_definition->is_inflated) {
7109 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7110 method_definition = imethod->declaring;
7113 /* SkipVerification is not allowed if core-clr is enabled */
7114 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7116 dont_verify_stloc = TRUE;
7119 if (sig->is_inflated)
7120 generic_context = mono_method_get_context (method);
7121 else if (generic_container)
7122 generic_context = &generic_container->context;
7123 cfg->generic_context = generic_context;
7125 if (!cfg->generic_sharing_context)
7126 g_assert (!sig->has_type_parameters);
7128 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7129 g_assert (method->is_inflated);
7130 g_assert (mono_method_get_context (method)->method_inst);
7132 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7133 g_assert (sig->generic_param_count);
7135 if (cfg->method == method) {
7136 cfg->real_offset = 0;
7138 cfg->real_offset = inline_offset;
7141 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7142 cfg->cil_offset_to_bb_len = header->code_size;
7144 cfg->current_method = method;
7146 if (cfg->verbose_level > 2)
7147 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7149 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7151 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7152 for (n = 0; n < sig->param_count; ++n)
7153 param_types [n + sig->hasthis] = sig->params [n];
7154 cfg->arg_types = param_types;
7156 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7157 if (cfg->method == method) {
7159 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7160 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7163 NEW_BBLOCK (cfg, start_bblock);
7164 cfg->bb_entry = start_bblock;
7165 start_bblock->cil_code = NULL;
7166 start_bblock->cil_length = 0;
7167 #if defined(__native_client_codegen__)
7168 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7169 ins->dreg = alloc_dreg (cfg, STACK_I4);
7170 MONO_ADD_INS (start_bblock, ins);
7174 NEW_BBLOCK (cfg, end_bblock);
7175 cfg->bb_exit = end_bblock;
7176 end_bblock->cil_code = NULL;
7177 end_bblock->cil_length = 0;
7178 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7179 g_assert (cfg->num_bblocks == 2);
7181 arg_array = cfg->args;
7183 if (header->num_clauses) {
7184 cfg->spvars = g_hash_table_new (NULL, NULL);
7185 cfg->exvars = g_hash_table_new (NULL, NULL);
7187 /* handle exception clauses */
7188 for (i = 0; i < header->num_clauses; ++i) {
7189 MonoBasicBlock *try_bb;
7190 MonoExceptionClause *clause = &header->clauses [i];
7191 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7192 try_bb->real_offset = clause->try_offset;
7193 try_bb->try_start = TRUE;
7194 try_bb->region = ((i + 1) << 8) | clause->flags;
7195 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7196 tblock->real_offset = clause->handler_offset;
7197 tblock->flags |= BB_EXCEPTION_HANDLER;
7200 * Linking the try block with the EH block hinders inlining as we won't be able to
7201 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7203 if (COMPILE_LLVM (cfg))
7204 link_bblock (cfg, try_bb, tblock);
7206 if (*(ip + clause->handler_offset) == CEE_POP)
7207 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7209 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7210 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7211 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7212 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7213 MONO_ADD_INS (tblock, ins);
7215 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7216 /* finally clauses already have a seq point */
7217 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7218 MONO_ADD_INS (tblock, ins);
7221 /* todo: is a fault block unsafe to optimize? */
7222 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7223 tblock->flags |= BB_EXCEPTION_UNSAFE;
7227 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7229 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7231 /* catch and filter blocks get the exception object on the stack */
7232 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7233 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7234 MonoInst *dummy_use;
7236 /* mostly like handle_stack_args (), but just sets the input args */
7237 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7238 tblock->in_scount = 1;
7239 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7240 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7243 * Add a dummy use for the exvar so its liveness info will be
7247 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7249 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7250 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7251 tblock->flags |= BB_EXCEPTION_HANDLER;
7252 tblock->real_offset = clause->data.filter_offset;
7253 tblock->in_scount = 1;
7254 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7255 /* The filter block shares the exvar with the handler block */
7256 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7257 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7258 MONO_ADD_INS (tblock, ins);
7262 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7263 clause->data.catch_class &&
7264 cfg->generic_sharing_context &&
7265 mono_class_check_context_used (clause->data.catch_class)) {
7267 * In shared generic code with catch
7268 * clauses containing type variables
7269 * the exception handling code has to
7270 * be able to get to the rgctx.
7271 * Therefore we have to make sure that
7272 * the vtable/mrgctx argument (for
7273 * static or generic methods) or the
7274 * "this" argument (for non-static
7275 * methods) are live.
7277 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7278 mini_method_get_context (method)->method_inst ||
7279 method->klass->valuetype) {
7280 mono_get_vtable_var (cfg);
7282 MonoInst *dummy_use;
7284 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7289 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7290 cfg->cbb = start_bblock;
7291 cfg->args = arg_array;
7292 mono_save_args (cfg, sig, inline_args);
7295 /* FIRST CODE BLOCK */
7296 NEW_BBLOCK (cfg, bblock);
7297 bblock->cil_code = ip;
7301 ADD_BBLOCK (cfg, bblock);
7303 if (cfg->method == method) {
7304 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7305 if (breakpoint_id) {
7306 MONO_INST_NEW (cfg, ins, OP_BREAK);
7307 MONO_ADD_INS (bblock, ins);
7311 if (mono_security_cas_enabled ())
7312 secman = mono_security_manager_get_methods ();
7314 security = (secman && mono_security_method_has_declsec (method));
7315 /* at this point having security doesn't mean we have any code to generate */
7316 if (security && (cfg->method == method)) {
7317 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7318 * And we do not want to enter the next section (with allocation) if we
7319 * have nothing to generate */
7320 security = mono_declsec_get_demands (method, &actions);
7323 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7324 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7326 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7327 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7328 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7330 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7331 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7335 mono_custom_attrs_free (custom);
7338 custom = mono_custom_attrs_from_class (wrapped->klass);
7339 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7343 mono_custom_attrs_free (custom);
7346 /* not a P/Invoke after all */
7351 /* we use a separate basic block for the initialization code */
7352 NEW_BBLOCK (cfg, init_localsbb);
7353 cfg->bb_init = init_localsbb;
7354 init_localsbb->real_offset = cfg->real_offset;
7355 start_bblock->next_bb = init_localsbb;
7356 init_localsbb->next_bb = bblock;
7357 link_bblock (cfg, start_bblock, init_localsbb);
7358 link_bblock (cfg, init_localsbb, bblock);
7360 cfg->cbb = init_localsbb;
7362 if (cfg->gsharedvt && cfg->method == method) {
7363 MonoGSharedVtMethodInfo *info;
7364 MonoInst *var, *locals_var;
7367 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7368 info->method = cfg->method;
7369 info->count_entries = 16;
7370 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7371 cfg->gsharedvt_info = info;
7373 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7374 /* prevent it from being register allocated */
7375 //var->flags |= MONO_INST_VOLATILE;
7376 cfg->gsharedvt_info_var = var;
7378 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7379 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7381 /* Allocate locals */
7382 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7383 /* prevent it from being register allocated */
7384 //locals_var->flags |= MONO_INST_VOLATILE;
7385 cfg->gsharedvt_locals_var = locals_var;
7387 dreg = alloc_ireg (cfg);
7388 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7390 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7391 ins->dreg = locals_var->dreg;
7393 MONO_ADD_INS (cfg->cbb, ins);
7394 cfg->gsharedvt_locals_var_ins = ins;
7396 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7399 ins->flags |= MONO_INST_INIT;
7403 /* at this point we know, if security is TRUE, that some code needs to be generated */
7404 if (security && (cfg->method == method)) {
7407 cfg->stat_cas_demand_generation++;
7409 if (actions.demand.blob) {
7410 /* Add code for SecurityAction.Demand */
7411 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7412 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7413 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7414 mono_emit_method_call (cfg, secman->demand, args, NULL);
7416 if (actions.noncasdemand.blob) {
7417 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7418 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7419 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7420 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7421 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7422 mono_emit_method_call (cfg, secman->demand, args, NULL);
7424 if (actions.demandchoice.blob) {
7425 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7426 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7427 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7428 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7429 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7433 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7435 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7438 if (mono_security_core_clr_enabled ()) {
7439 /* check if this is native code, e.g. an icall or a p/invoke */
7440 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7441 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7443 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7444 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7446 /* if this ia a native call then it can only be JITted from platform code */
7447 if ((icall || pinvk) && method->klass && method->klass->image) {
7448 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7449 MonoException *ex = icall ? mono_get_exception_security () :
7450 mono_get_exception_method_access ();
7451 emit_throw_exception (cfg, ex);
7458 CHECK_CFG_EXCEPTION;
7460 if (header->code_size == 0)
7463 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7468 if (cfg->method == method)
7469 mono_debug_init_method (cfg, bblock, breakpoint_id);
7471 for (n = 0; n < header->num_locals; ++n) {
7472 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7477 /* We force the vtable variable here for all shared methods
7478 for the possibility that they might show up in a stack
7479 trace where their exact instantiation is needed. */
7480 if (cfg->generic_sharing_context && method == cfg->method) {
7481 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7482 mini_method_get_context (method)->method_inst ||
7483 method->klass->valuetype) {
7484 mono_get_vtable_var (cfg);
7486 /* FIXME: Is there a better way to do this?
7487 We need the variable live for the duration
7488 of the whole method. */
7489 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7493 /* add a check for this != NULL to inlined methods */
7494 if (is_virtual_call) {
7497 NEW_ARGLOAD (cfg, arg_ins, 0);
7498 MONO_ADD_INS (cfg->cbb, arg_ins);
7499 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7502 skip_dead_blocks = !dont_verify;
7503 if (skip_dead_blocks) {
7504 original_bb = bb = mono_basic_block_split (method, &error);
7505 if (!mono_error_ok (&error)) {
7506 mono_error_cleanup (&error);
7512 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7513 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7516 start_new_bblock = 0;
7519 if (cfg->method == method)
7520 cfg->real_offset = ip - header->code;
7522 cfg->real_offset = inline_offset;
7527 if (start_new_bblock) {
7528 bblock->cil_length = ip - bblock->cil_code;
7529 if (start_new_bblock == 2) {
7530 g_assert (ip == tblock->cil_code);
7532 GET_BBLOCK (cfg, tblock, ip);
7534 bblock->next_bb = tblock;
7537 start_new_bblock = 0;
7538 for (i = 0; i < bblock->in_scount; ++i) {
7539 if (cfg->verbose_level > 3)
7540 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7541 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7545 g_slist_free (class_inits);
7548 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7549 link_bblock (cfg, bblock, tblock);
7550 if (sp != stack_start) {
7551 handle_stack_args (cfg, stack_start, sp - stack_start);
7553 CHECK_UNVERIFIABLE (cfg);
7555 bblock->next_bb = tblock;
7558 for (i = 0; i < bblock->in_scount; ++i) {
7559 if (cfg->verbose_level > 3)
7560 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7561 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7564 g_slist_free (class_inits);
7569 if (skip_dead_blocks) {
7570 int ip_offset = ip - header->code;
7572 if (ip_offset == bb->end)
7576 int op_size = mono_opcode_size (ip, end);
7577 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7579 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7581 if (ip_offset + op_size == bb->end) {
7582 MONO_INST_NEW (cfg, ins, OP_NOP);
7583 MONO_ADD_INS (bblock, ins);
7584 start_new_bblock = 1;
7592 * Sequence points are points where the debugger can place a breakpoint.
7593 * Currently, we generate these automatically at points where the IL
7596 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7598 * Make methods interruptable at the beginning, and at the targets of
7599 * backward branches.
7600 * Also, do this at the start of every bblock in methods with clauses too,
7601 * to be able to handle instructions with inprecise control flow like
7603 * Backward branches are handled at the end of method-to-ir ().
7605 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7607 /* Avoid sequence points on empty IL like .volatile */
7608 // FIXME: Enable this
7609 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7610 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7611 if (sp != stack_start)
7612 ins->flags |= MONO_INST_NONEMPTY_STACK;
7613 MONO_ADD_INS (cfg->cbb, ins);
7616 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7619 bblock->real_offset = cfg->real_offset;
7621 if ((cfg->method == method) && cfg->coverage_info) {
7622 guint32 cil_offset = ip - header->code;
7623 cfg->coverage_info->data [cil_offset].cil_code = ip;
7625 /* TODO: Use an increment here */
7626 #if defined(TARGET_X86)
7627 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7628 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7630 MONO_ADD_INS (cfg->cbb, ins);
7632 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7633 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7637 if (cfg->verbose_level > 3)
7638 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7642 if (seq_points && !sym_seq_points && sp != stack_start) {
7644 * The C# compiler uses these nops to notify the JIT that it should
7645 * insert seq points.
7647 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7648 MONO_ADD_INS (cfg->cbb, ins);
7650 if (cfg->keep_cil_nops)
7651 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7653 MONO_INST_NEW (cfg, ins, OP_NOP);
7655 MONO_ADD_INS (bblock, ins);
7658 if (should_insert_brekpoint (cfg->method)) {
7659 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7661 MONO_INST_NEW (cfg, ins, OP_NOP);
7664 MONO_ADD_INS (bblock, ins);
7670 CHECK_STACK_OVF (1);
7671 n = (*ip)-CEE_LDARG_0;
7673 EMIT_NEW_ARGLOAD (cfg, ins, n);
7681 CHECK_STACK_OVF (1);
7682 n = (*ip)-CEE_LDLOC_0;
7684 EMIT_NEW_LOCLOAD (cfg, ins, n);
7693 n = (*ip)-CEE_STLOC_0;
7696 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7698 emit_stloc_ir (cfg, sp, header, n);
7705 CHECK_STACK_OVF (1);
7708 EMIT_NEW_ARGLOAD (cfg, ins, n);
7714 CHECK_STACK_OVF (1);
7717 NEW_ARGLOADA (cfg, ins, n);
7718 MONO_ADD_INS (cfg->cbb, ins);
7728 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7730 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7735 CHECK_STACK_OVF (1);
7738 EMIT_NEW_LOCLOAD (cfg, ins, n);
7742 case CEE_LDLOCA_S: {
7743 unsigned char *tmp_ip;
7745 CHECK_STACK_OVF (1);
7746 CHECK_LOCAL (ip [1]);
7748 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7754 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7763 CHECK_LOCAL (ip [1]);
7764 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7766 emit_stloc_ir (cfg, sp, header, ip [1]);
7771 CHECK_STACK_OVF (1);
7772 EMIT_NEW_PCONST (cfg, ins, NULL);
7773 ins->type = STACK_OBJ;
7778 CHECK_STACK_OVF (1);
7779 EMIT_NEW_ICONST (cfg, ins, -1);
7792 CHECK_STACK_OVF (1);
7793 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7799 CHECK_STACK_OVF (1);
7801 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7807 CHECK_STACK_OVF (1);
7808 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7814 CHECK_STACK_OVF (1);
7815 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7816 ins->type = STACK_I8;
7817 ins->dreg = alloc_dreg (cfg, STACK_I8);
7819 ins->inst_l = (gint64)read64 (ip);
7820 MONO_ADD_INS (bblock, ins);
7826 gboolean use_aotconst = FALSE;
7828 #ifdef TARGET_POWERPC
7829 /* FIXME: Clean this up */
7830 if (cfg->compile_aot)
7831 use_aotconst = TRUE;
7834 /* FIXME: we should really allocate this only late in the compilation process */
7835 f = mono_domain_alloc (cfg->domain, sizeof (float));
7837 CHECK_STACK_OVF (1);
7843 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7845 dreg = alloc_freg (cfg);
7846 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7847 ins->type = STACK_R8;
7849 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7850 ins->type = STACK_R8;
7851 ins->dreg = alloc_dreg (cfg, STACK_R8);
7853 MONO_ADD_INS (bblock, ins);
7863 gboolean use_aotconst = FALSE;
7865 #ifdef TARGET_POWERPC
7866 /* FIXME: Clean this up */
7867 if (cfg->compile_aot)
7868 use_aotconst = TRUE;
7871 /* FIXME: we should really allocate this only late in the compilation process */
7872 d = mono_domain_alloc (cfg->domain, sizeof (double));
7874 CHECK_STACK_OVF (1);
7880 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7882 dreg = alloc_freg (cfg);
7883 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7884 ins->type = STACK_R8;
7886 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7887 ins->type = STACK_R8;
7888 ins->dreg = alloc_dreg (cfg, STACK_R8);
7890 MONO_ADD_INS (bblock, ins);
7899 MonoInst *temp, *store;
7901 CHECK_STACK_OVF (1);
7905 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7906 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7908 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7911 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7924 if (sp [0]->type == STACK_R8)
7925 /* we need to pop the value from the x86 FP stack */
7926 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7932 INLINE_FAILURE ("jmp");
7933 GSHAREDVT_FAILURE (*ip);
7936 if (stack_start != sp)
7938 token = read32 (ip + 1);
7939 /* FIXME: check the signature matches */
7940 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7942 if (!cmethod || mono_loader_get_last_error ())
7945 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7946 GENERIC_SHARING_FAILURE (CEE_JMP);
7948 if (mono_security_cas_enabled ())
7949 CHECK_CFG_EXCEPTION;
7951 emit_instrumentation_call (cfg, mono_profiler_method_leave);
7953 if (ARCH_HAVE_OP_TAIL_CALL) {
7954 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7957 /* Handle tail calls similarly to calls */
7958 n = fsig->param_count + fsig->hasthis;
7962 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7963 call->method = cmethod;
7964 call->tail_call = TRUE;
7965 call->signature = mono_method_signature (cmethod);
7966 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7967 call->inst.inst_p0 = cmethod;
7968 for (i = 0; i < n; ++i)
7969 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7971 mono_arch_emit_call (cfg, call);
7972 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
7973 MONO_ADD_INS (bblock, (MonoInst*)call);
7975 for (i = 0; i < num_args; ++i)
7976 /* Prevent arguments from being optimized away */
7977 arg_array [i]->flags |= MONO_INST_VOLATILE;
7979 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7980 ins = (MonoInst*)call;
7981 ins->inst_p0 = cmethod;
7982 MONO_ADD_INS (bblock, ins);
7986 start_new_bblock = 1;
7991 case CEE_CALLVIRT: {
7992 MonoInst *addr = NULL;
7993 MonoMethodSignature *fsig = NULL;
7995 int virtual = *ip == CEE_CALLVIRT;
7996 int calli = *ip == CEE_CALLI;
7997 gboolean pass_imt_from_rgctx = FALSE;
7998 MonoInst *imt_arg = NULL;
7999 MonoInst *keep_this_alive = NULL;
8000 gboolean pass_vtable = FALSE;
8001 gboolean pass_mrgctx = FALSE;
8002 MonoInst *vtable_arg = NULL;
8003 gboolean check_this = FALSE;
8004 gboolean supported_tail_call = FALSE;
8005 gboolean tail_call = FALSE;
8006 gboolean need_seq_point = FALSE;
8007 guint32 call_opcode = *ip;
8008 gboolean emit_widen = TRUE;
8009 gboolean push_res = TRUE;
8010 gboolean skip_ret = FALSE;
8011 gboolean delegate_invoke = FALSE;
8014 token = read32 (ip + 1);
8019 //GSHAREDVT_FAILURE (*ip);
8024 fsig = mini_get_signature (method, token, generic_context);
8025 n = fsig->param_count + fsig->hasthis;
8027 if (method->dynamic && fsig->pinvoke) {
8031 * This is a call through a function pointer using a pinvoke
8032 * signature. Have to create a wrapper and call that instead.
8033 * FIXME: This is very slow, need to create a wrapper at JIT time
8034 * instead based on the signature.
8036 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8037 EMIT_NEW_PCONST (cfg, args [1], fsig);
8039 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8042 MonoMethod *cil_method;
8044 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8045 cil_method = cmethod;
8047 if (constrained_call) {
8048 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8049 if (cfg->verbose_level > 2)
8050 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8051 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
8052 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
8053 cfg->generic_sharing_context)) {
8054 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
8057 if (cfg->verbose_level > 2)
8058 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8060 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8062 * This is needed since get_method_constrained can't find
8063 * the method in klass representing a type var.
8064 * The type var is guaranteed to be a reference type in this
8067 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
8068 g_assert (!cmethod->klass->valuetype);
8070 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
8075 if (!cmethod || mono_loader_get_last_error ())
8077 if (!dont_verify && !cfg->skip_visibility) {
8078 MonoMethod *target_method = cil_method;
8079 if (method->is_inflated) {
8080 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8082 if (!mono_method_can_access_method (method_definition, target_method) &&
8083 !mono_method_can_access_method (method, cil_method))
8084 METHOD_ACCESS_FAILURE (method, cil_method);
8087 if (mono_security_core_clr_enabled ())
8088 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8090 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8091 /* MS.NET seems to silently convert this to a callvirt */
8096 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8097 * converts to a callvirt.
8099 * tests/bug-515884.il is an example of this behavior
8101 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8102 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8103 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8107 if (!cmethod->klass->inited)
8108 if (!mono_class_init (cmethod->klass))
8109 TYPE_LOAD_ERROR (cmethod->klass);
8111 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8112 mini_class_is_system_array (cmethod->klass)) {
8113 array_rank = cmethod->klass->rank;
8114 fsig = mono_method_signature (cmethod);
8116 fsig = mono_method_signature (cmethod);
8121 if (fsig->pinvoke) {
8122 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8123 check_for_pending_exc, cfg->compile_aot);
8124 fsig = mono_method_signature (wrapper);
8125 } else if (constrained_call) {
8126 fsig = mono_method_signature (cmethod);
8128 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
8132 mono_save_token_info (cfg, image, token, cil_method);
8134 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8136 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
8137 * foo (bar (), baz ())
8138 * works correctly. MS does this also:
8139 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
8140 * The problem with this approach is that the debugger will stop after all calls returning a value,
8141 * even for simple cases, like:
8144 /* Special case a few common successor opcodes */
8145 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8146 need_seq_point = TRUE;
8149 n = fsig->param_count + fsig->hasthis;
8151 /* Don't support calls made using type arguments for now */
8153 if (cfg->gsharedvt) {
8154 if (mini_is_gsharedvt_signature (cfg, fsig))
8155 GSHAREDVT_FAILURE (*ip);
8159 if (mono_security_cas_enabled ()) {
8160 if (check_linkdemand (cfg, method, cmethod))
8161 INLINE_FAILURE ("linkdemand");
8162 CHECK_CFG_EXCEPTION;
8165 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8166 g_assert_not_reached ();
8169 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8172 if (!cfg->generic_sharing_context && cmethod)
8173 g_assert (!mono_method_check_context_used (cmethod));
8177 //g_assert (!virtual || fsig->hasthis);
8181 if (constrained_call) {
8182 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8184 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
8186 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8187 /* The 'Own method' case below */
8188 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8189 /* 'The type parameter is instantiated as a reference type' case below. */
8190 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
8191 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
8192 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
8193 MonoInst *args [16];
8196 * This case handles calls to
8197 * - object:ToString()/Equals()/GetHashCode(),
8198 * - System.IComparable<T>:CompareTo()
8199 * - System.IEquatable<T>:Equals ()
8200 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
8204 if (mono_method_check_context_used (cmethod))
8205 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8207 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8208 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8210 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8211 if (fsig->hasthis && fsig->param_count) {
8212 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8213 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8214 ins->dreg = alloc_preg (cfg);
8215 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8216 MONO_ADD_INS (cfg->cbb, ins);
8219 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8222 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8224 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8225 addr_reg = ins->dreg;
8226 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8228 EMIT_NEW_ICONST (cfg, args [3], 0);
8229 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8232 EMIT_NEW_ICONST (cfg, args [3], 0);
8233 EMIT_NEW_ICONST (cfg, args [4], 0);
8235 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8238 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8239 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8240 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
8244 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8245 MONO_ADD_INS (cfg->cbb, add);
8247 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8248 MONO_ADD_INS (cfg->cbb, ins);
8249 /* ins represents the call result */
8254 GSHAREDVT_FAILURE (*ip);
8258 * We have the `constrained.' prefix opcode.
8260 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8262 * The type parameter is instantiated as a valuetype,
8263 * but that type doesn't override the method we're
8264 * calling, so we need to box `this'.
8266 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8267 ins->klass = constrained_call;
8268 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8269 CHECK_CFG_EXCEPTION;
8270 } else if (!constrained_call->valuetype) {
8271 int dreg = alloc_ireg_ref (cfg);
8274 * The type parameter is instantiated as a reference
8275 * type. We have a managed pointer on the stack, so
8276 * we need to dereference it here.
8278 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8279 ins->type = STACK_OBJ;
8282 if (cmethod->klass->valuetype) {
8285 /* Interface method */
8288 mono_class_setup_vtable (constrained_call);
8289 CHECK_TYPELOAD (constrained_call);
8290 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8292 TYPE_LOAD_ERROR (constrained_call);
8293 slot = mono_method_get_vtable_slot (cmethod);
8295 TYPE_LOAD_ERROR (cmethod->klass);
8296 cmethod = constrained_call->vtable [ioffset + slot];
8298 if (cmethod->klass == mono_defaults.enum_class) {
8299 /* Enum implements some interfaces, so treat this as the first case */
8300 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8301 ins->klass = constrained_call;
8302 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8303 CHECK_CFG_EXCEPTION;
8308 constrained_call = NULL;
8311 if (!calli && check_call_signature (cfg, fsig, sp))
8314 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8315 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8316 delegate_invoke = TRUE;
8319 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8321 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8322 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8330 * If the callee is a shared method, then its static cctor
8331 * might not get called after the call was patched.
8333 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8334 emit_generic_class_init (cfg, cmethod->klass);
8335 CHECK_TYPELOAD (cmethod->klass);
8339 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8341 if (cfg->generic_sharing_context && cmethod) {
8342 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8344 context_used = mini_method_check_context_used (cfg, cmethod);
8346 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8347 /* Generic method interface
8348 calls are resolved via a
8349 helper function and don't
8351 if (!cmethod_context || !cmethod_context->method_inst)
8352 pass_imt_from_rgctx = TRUE;
8356 * If a shared method calls another
8357 * shared method then the caller must
8358 * have a generic sharing context
8359 * because the magic trampoline
8360 * requires it. FIXME: We shouldn't
8361 * have to force the vtable/mrgctx
8362 * variable here. Instead there
8363 * should be a flag in the cfg to
8364 * request a generic sharing context.
8367 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8368 mono_get_vtable_var (cfg);
8373 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8375 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8377 CHECK_TYPELOAD (cmethod->klass);
8378 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8383 g_assert (!vtable_arg);
8385 if (!cfg->compile_aot) {
8387 * emit_get_rgctx_method () calls mono_class_vtable () so check
8388 * for type load errors before.
8390 mono_class_setup_vtable (cmethod->klass);
8391 CHECK_TYPELOAD (cmethod->klass);
8394 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8396 /* !marshalbyref is needed to properly handle generic methods + remoting */
8397 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8398 MONO_METHOD_IS_FINAL (cmethod)) &&
8399 !mono_class_is_marshalbyref (cmethod->klass)) {
8406 if (pass_imt_from_rgctx) {
8407 g_assert (!pass_vtable);
8410 imt_arg = emit_get_rgctx_method (cfg, context_used,
8411 cmethod, MONO_RGCTX_INFO_METHOD);
8415 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8417 /* Calling virtual generic methods */
8418 if (cmethod && virtual &&
8419 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8420 !(MONO_METHOD_IS_FINAL (cmethod) &&
8421 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8422 fsig->generic_param_count &&
8423 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8424 MonoInst *this_temp, *this_arg_temp, *store;
8425 MonoInst *iargs [4];
8426 gboolean use_imt = FALSE;
8428 g_assert (fsig->is_inflated);
8430 /* Prevent inlining of methods that contain indirect calls */
8431 INLINE_FAILURE ("virtual generic call");
8433 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8434 GSHAREDVT_FAILURE (*ip);
8436 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8437 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8442 g_assert (!imt_arg);
8444 g_assert (cmethod->is_inflated);
8445 imt_arg = emit_get_rgctx_method (cfg, context_used,
8446 cmethod, MONO_RGCTX_INFO_METHOD);
8447 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8449 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8450 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8451 MONO_ADD_INS (bblock, store);
8453 /* FIXME: This should be a managed pointer */
8454 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8456 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8457 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8458 cmethod, MONO_RGCTX_INFO_METHOD);
8459 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8460 addr = mono_emit_jit_icall (cfg,
8461 mono_helper_compile_generic_method, iargs);
8463 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8465 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8472 * Implement a workaround for the inherent races involved in locking:
8478 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8479 * try block, the Exit () won't be executed, see:
8480 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8481 * To work around this, we extend such try blocks to include the last x bytes
8482 * of the Monitor.Enter () call.
8484 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8485 MonoBasicBlock *tbb;
8487 GET_BBLOCK (cfg, tbb, ip + 5);
8489 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8490 * from Monitor.Enter like ArgumentNullException.
8492 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8493 /* Mark this bblock as needing to be extended */
8494 tbb->extend_try_block = TRUE;
8498 /* Conversion to a JIT intrinsic */
8499 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8501 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8502 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8509 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8510 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8511 mono_method_check_inlining (cfg, cmethod)) {
8513 gboolean always = FALSE;
8515 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8516 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8517 /* Prevent inlining of methods that call wrappers */
8518 INLINE_FAILURE ("wrapper call");
8519 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8523 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
8525 cfg->real_offset += 5;
8527 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8528 /* *sp is already set by inline_method */
8533 inline_costs += costs;
8539 /* Tail recursion elimination */
8540 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8541 gboolean has_vtargs = FALSE;
8544 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8545 INLINE_FAILURE ("tail call");
8547 /* keep it simple */
8548 for (i = fsig->param_count - 1; i >= 0; i--) {
8549 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8554 for (i = 0; i < n; ++i)
8555 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8556 MONO_INST_NEW (cfg, ins, OP_BR);
8557 MONO_ADD_INS (bblock, ins);
8558 tblock = start_bblock->out_bb [0];
8559 link_bblock (cfg, bblock, tblock);
8560 ins->inst_target_bb = tblock;
8561 start_new_bblock = 1;
8563 /* skip the CEE_RET, too */
8564 if (ip_in_bb (cfg, bblock, ip + 5))
8571 inline_costs += 10 * num_calls++;
8574 * Making generic calls out of gsharedvt methods.
8575 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8576 * patching gshared method addresses into a gsharedvt method.
8578 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class)) {
8579 MonoRgctxInfoType info_type;
8582 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8583 //GSHAREDVT_FAILURE (*ip);
8584 // disable for possible remoting calls
8585 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8586 GSHAREDVT_FAILURE (*ip);
8587 if (fsig->generic_param_count) {
8588 /* virtual generic call */
8589 g_assert (mono_use_imt);
8590 g_assert (!imt_arg);
8591 /* Same as the virtual generic case above */
8592 imt_arg = emit_get_rgctx_method (cfg, context_used,
8593 cmethod, MONO_RGCTX_INFO_METHOD);
8594 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8596 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
8597 /* This can happen when we call a fully instantiated iface method */
8598 imt_arg = emit_get_rgctx_method (cfg, context_used,
8599 cmethod, MONO_RGCTX_INFO_METHOD);
8604 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8605 /* test_0_multi_dim_arrays () in gshared.cs */
8606 GSHAREDVT_FAILURE (*ip);
8608 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8609 keep_this_alive = sp [0];
8611 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8612 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8614 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8615 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8617 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8619 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8621 * We pass the address to the gsharedvt trampoline in the rgctx reg
8623 MonoInst *callee = addr;
8625 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8627 GSHAREDVT_FAILURE (*ip);
8629 addr = emit_get_rgctx_sig (cfg, context_used,
8630 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8631 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8635 /* Generic sharing */
8638 * Use this if the callee is gsharedvt sharable too, since
8639 * at runtime we might find an instantiation so the call cannot
8640 * be patched (the 'no_patch' code path in mini-trampolines.c).
8642 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8643 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8644 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8645 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8646 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8647 INLINE_FAILURE ("gshared");
8649 g_assert (cfg->generic_sharing_context && cmethod);
8653 * We are compiling a call to a
8654 * generic method from shared code,
8655 * which means that we have to look up
8656 * the method in the rgctx and do an
8660 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8662 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8663 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8667 /* Indirect calls */
8669 if (call_opcode == CEE_CALL)
8670 g_assert (context_used);
8671 else if (call_opcode == CEE_CALLI)
8672 g_assert (!vtable_arg);
8674 /* FIXME: what the hell is this??? */
8675 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8676 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8678 /* Prevent inlining of methods with indirect calls */
8679 INLINE_FAILURE ("indirect call");
8681 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8686 * Instead of emitting an indirect call, emit a direct call
8687 * with the contents of the aotconst as the patch info.
8689 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8690 info_type = addr->inst_c1;
8691 info_data = addr->inst_p0;
8693 info_type = addr->inst_right->inst_c1;
8694 info_data = addr->inst_right->inst_left;
8697 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8698 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8703 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8711 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8712 MonoInst *val = sp [fsig->param_count];
8714 if (val->type == STACK_OBJ) {
8715 MonoInst *iargs [2];
8720 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8723 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8724 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8725 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8726 emit_write_barrier (cfg, addr, val);
8727 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8728 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8730 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8731 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8732 if (!cmethod->klass->element_class->valuetype && !readonly)
8733 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8734 CHECK_TYPELOAD (cmethod->klass);
8737 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8740 g_assert_not_reached ();
8747 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8751 /* Tail prefix / tail call optimization */
8753 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8754 /* FIXME: runtime generic context pointer for jumps? */
8755 /* FIXME: handle this for generic sharing eventually */
8756 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8757 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8758 supported_tail_call = TRUE;
8760 if (supported_tail_call) {
8763 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8764 INLINE_FAILURE ("tail call");
8766 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8768 if (ARCH_HAVE_OP_TAIL_CALL) {
8769 /* Handle tail calls similarly to normal calls */
8772 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8774 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8775 call->tail_call = TRUE;
8776 call->method = cmethod;
8777 call->signature = mono_method_signature (cmethod);
8780 * We implement tail calls by storing the actual arguments into the
8781 * argument variables, then emitting a CEE_JMP.
8783 for (i = 0; i < n; ++i) {
8784 /* Prevent argument from being register allocated */
8785 arg_array [i]->flags |= MONO_INST_VOLATILE;
8786 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8788 ins = (MonoInst*)call;
8789 ins->inst_p0 = cmethod;
8790 ins->inst_p1 = arg_array [0];
8791 MONO_ADD_INS (bblock, ins);
8792 link_bblock (cfg, bblock, end_bblock);
8793 start_new_bblock = 1;
8795 // FIXME: Eliminate unreachable epilogs
8798 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8799 * only reachable from this call.
8801 GET_BBLOCK (cfg, tblock, ip + 5);
8802 if (tblock == bblock || tblock->in_count == 0)
8811 * Synchronized wrappers.
8812 * Its hard to determine where to replace a method with its synchronized
8813 * wrapper without causing an infinite recursion. The current solution is
8814 * to add the synchronized wrapper in the trampolines, and to
8815 * change the called method to a dummy wrapper, and resolve that wrapper
8816 * to the real method in mono_jit_compile_method ().
8818 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8819 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8820 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8821 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8825 INLINE_FAILURE ("call");
8826 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8827 imt_arg, vtable_arg);
8830 link_bblock (cfg, bblock, end_bblock);
8831 start_new_bblock = 1;
8833 // FIXME: Eliminate unreachable epilogs
8836 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8837 * only reachable from this call.
8839 GET_BBLOCK (cfg, tblock, ip + 5);
8840 if (tblock == bblock || tblock->in_count == 0)
8847 /* End of call, INS should contain the result of the call, if any */
8849 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8852 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8857 if (keep_this_alive) {
8858 MonoInst *dummy_use;
8860 /* See mono_emit_method_call_full () */
8861 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8864 CHECK_CFG_EXCEPTION;
8868 g_assert (*ip == CEE_RET);
8872 constrained_call = NULL;
8874 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8878 if (cfg->method != method) {
8879 /* return from inlined method */
8881 * If in_count == 0, that means the ret is unreachable due to
8882 * being preceeded by a throw. In that case, inline_method () will
8883 * handle setting the return value
8884 * (test case: test_0_inline_throw ()).
8886 if (return_var && cfg->cbb->in_count) {
8887 MonoType *ret_type = mono_method_signature (method)->ret;
8893 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8896 //g_assert (returnvar != -1);
8897 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8898 cfg->ret_var_set = TRUE;
8901 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8903 if (cfg->lmf_var && cfg->cbb->in_count)
8907 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8909 if (seq_points && !sym_seq_points) {
8911 * Place a seq point here too even through the IL stack is not
8912 * empty, so a step over on
8915 * will work correctly.
8917 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8918 MONO_ADD_INS (cfg->cbb, ins);
8921 g_assert (!return_var);
8925 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8928 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8931 if (!cfg->vret_addr) {
8934 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8936 EMIT_NEW_RETLOADA (cfg, ret_addr);
8938 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8939 ins->klass = mono_class_from_mono_type (ret_type);
8942 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8943 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8944 MonoInst *iargs [1];
8948 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8949 mono_arch_emit_setret (cfg, method, conv);
8951 mono_arch_emit_setret (cfg, method, *sp);
8954 mono_arch_emit_setret (cfg, method, *sp);
8959 if (sp != stack_start)
8961 MONO_INST_NEW (cfg, ins, OP_BR);
8963 ins->inst_target_bb = end_bblock;
8964 MONO_ADD_INS (bblock, ins);
8965 link_bblock (cfg, bblock, end_bblock);
8966 start_new_bblock = 1;
8970 MONO_INST_NEW (cfg, ins, OP_BR);
8972 target = ip + 1 + (signed char)(*ip);
8974 GET_BBLOCK (cfg, tblock, target);
8975 link_bblock (cfg, bblock, tblock);
8976 ins->inst_target_bb = tblock;
8977 if (sp != stack_start) {
8978 handle_stack_args (cfg, stack_start, sp - stack_start);
8980 CHECK_UNVERIFIABLE (cfg);
8982 MONO_ADD_INS (bblock, ins);
8983 start_new_bblock = 1;
8984 inline_costs += BRANCH_COST;
8998 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9000 target = ip + 1 + *(signed char*)ip;
9006 inline_costs += BRANCH_COST;
9010 MONO_INST_NEW (cfg, ins, OP_BR);
9013 target = ip + 4 + (gint32)read32(ip);
9015 GET_BBLOCK (cfg, tblock, target);
9016 link_bblock (cfg, bblock, tblock);
9017 ins->inst_target_bb = tblock;
9018 if (sp != stack_start) {
9019 handle_stack_args (cfg, stack_start, sp - stack_start);
9021 CHECK_UNVERIFIABLE (cfg);
9024 MONO_ADD_INS (bblock, ins);
9026 start_new_bblock = 1;
9027 inline_costs += BRANCH_COST;
9034 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9035 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9036 guint32 opsize = is_short ? 1 : 4;
9038 CHECK_OPSIZE (opsize);
9040 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9043 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9048 GET_BBLOCK (cfg, tblock, target);
9049 link_bblock (cfg, bblock, tblock);
9050 GET_BBLOCK (cfg, tblock, ip);
9051 link_bblock (cfg, bblock, tblock);
9053 if (sp != stack_start) {
9054 handle_stack_args (cfg, stack_start, sp - stack_start);
9055 CHECK_UNVERIFIABLE (cfg);
9058 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9059 cmp->sreg1 = sp [0]->dreg;
9060 type_from_op (cmp, sp [0], NULL);
9063 #if SIZEOF_REGISTER == 4
9064 if (cmp->opcode == OP_LCOMPARE_IMM) {
9065 /* Convert it to OP_LCOMPARE */
9066 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9067 ins->type = STACK_I8;
9068 ins->dreg = alloc_dreg (cfg, STACK_I8);
9070 MONO_ADD_INS (bblock, ins);
9071 cmp->opcode = OP_LCOMPARE;
9072 cmp->sreg2 = ins->dreg;
9075 MONO_ADD_INS (bblock, cmp);
9077 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9078 type_from_op (ins, sp [0], NULL);
9079 MONO_ADD_INS (bblock, ins);
9080 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9081 GET_BBLOCK (cfg, tblock, target);
9082 ins->inst_true_bb = tblock;
9083 GET_BBLOCK (cfg, tblock, ip);
9084 ins->inst_false_bb = tblock;
9085 start_new_bblock = 2;
9088 inline_costs += BRANCH_COST;
9103 MONO_INST_NEW (cfg, ins, *ip);
9105 target = ip + 4 + (gint32)read32(ip);
9111 inline_costs += BRANCH_COST;
9115 MonoBasicBlock **targets;
9116 MonoBasicBlock *default_bblock;
9117 MonoJumpInfoBBTable *table;
9118 int offset_reg = alloc_preg (cfg);
9119 int target_reg = alloc_preg (cfg);
9120 int table_reg = alloc_preg (cfg);
9121 int sum_reg = alloc_preg (cfg);
9122 gboolean use_op_switch;
9126 n = read32 (ip + 1);
9129 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9133 CHECK_OPSIZE (n * sizeof (guint32));
9134 target = ip + n * sizeof (guint32);
9136 GET_BBLOCK (cfg, default_bblock, target);
9137 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9139 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9140 for (i = 0; i < n; ++i) {
9141 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9142 targets [i] = tblock;
9143 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9147 if (sp != stack_start) {
9149 * Link the current bb with the targets as well, so handle_stack_args
9150 * will set their in_stack correctly.
9152 link_bblock (cfg, bblock, default_bblock);
9153 for (i = 0; i < n; ++i)
9154 link_bblock (cfg, bblock, targets [i]);
9156 handle_stack_args (cfg, stack_start, sp - stack_start);
9158 CHECK_UNVERIFIABLE (cfg);
9161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9162 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9165 for (i = 0; i < n; ++i)
9166 link_bblock (cfg, bblock, targets [i]);
9168 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9169 table->table = targets;
9170 table->table_size = n;
9172 use_op_switch = FALSE;
9174 /* ARM implements SWITCH statements differently */
9175 /* FIXME: Make it use the generic implementation */
9176 if (!cfg->compile_aot)
9177 use_op_switch = TRUE;
9180 if (COMPILE_LLVM (cfg))
9181 use_op_switch = TRUE;
9183 cfg->cbb->has_jump_table = 1;
9185 if (use_op_switch) {
9186 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9187 ins->sreg1 = src1->dreg;
9188 ins->inst_p0 = table;
9189 ins->inst_many_bb = targets;
9190 ins->klass = GUINT_TO_POINTER (n);
9191 MONO_ADD_INS (cfg->cbb, ins);
9193 if (sizeof (gpointer) == 8)
9194 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9196 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9198 #if SIZEOF_REGISTER == 8
9199 /* The upper word might not be zero, and we add it to a 64 bit address later */
9200 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9203 if (cfg->compile_aot) {
9204 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9206 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9207 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9208 ins->inst_p0 = table;
9209 ins->dreg = table_reg;
9210 MONO_ADD_INS (cfg->cbb, ins);
9213 /* FIXME: Use load_memindex */
9214 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9215 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9216 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9218 start_new_bblock = 1;
9219 inline_costs += (BRANCH_COST * 2);
9239 dreg = alloc_freg (cfg);
9242 dreg = alloc_lreg (cfg);
9245 dreg = alloc_ireg_ref (cfg);
9248 dreg = alloc_preg (cfg);
9251 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9252 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9253 ins->flags |= ins_flag;
9254 MONO_ADD_INS (bblock, ins);
9256 if (ins_flag & MONO_INST_VOLATILE) {
9257 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9258 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9259 emit_memory_barrier (cfg, FullBarrier);
9275 if (ins_flag & MONO_INST_VOLATILE) {
9276 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9277 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9278 emit_memory_barrier (cfg, FullBarrier);
9281 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9282 ins->flags |= ins_flag;
9285 MONO_ADD_INS (bblock, ins);
9287 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9288 emit_write_barrier (cfg, sp [0], sp [1]);
9297 MONO_INST_NEW (cfg, ins, (*ip));
9299 ins->sreg1 = sp [0]->dreg;
9300 ins->sreg2 = sp [1]->dreg;
9301 type_from_op (ins, sp [0], sp [1]);
9303 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9305 /* Use the immediate opcodes if possible */
9306 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9307 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9308 if (imm_opcode != -1) {
9309 ins->opcode = imm_opcode;
9310 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9313 NULLIFY_INS (sp [1]);
9317 MONO_ADD_INS ((cfg)->cbb, (ins));
9319 *sp++ = mono_decompose_opcode (cfg, ins);
9336 MONO_INST_NEW (cfg, ins, (*ip));
9338 ins->sreg1 = sp [0]->dreg;
9339 ins->sreg2 = sp [1]->dreg;
9340 type_from_op (ins, sp [0], sp [1]);
9342 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9343 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9345 /* FIXME: Pass opcode to is_inst_imm */
9347 /* Use the immediate opcodes if possible */
9348 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9351 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9352 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9353 /* Keep emulated opcodes which are optimized away later */
9354 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9355 imm_opcode = mono_op_to_op_imm (ins->opcode);
9358 if (imm_opcode != -1) {
9359 ins->opcode = imm_opcode;
9360 if (sp [1]->opcode == OP_I8CONST) {
9361 #if SIZEOF_REGISTER == 8
9362 ins->inst_imm = sp [1]->inst_l;
9364 ins->inst_ls_word = sp [1]->inst_ls_word;
9365 ins->inst_ms_word = sp [1]->inst_ms_word;
9369 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9372 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9373 if (sp [1]->next == NULL)
9374 NULLIFY_INS (sp [1]);
9377 MONO_ADD_INS ((cfg)->cbb, (ins));
9379 *sp++ = mono_decompose_opcode (cfg, ins);
9392 case CEE_CONV_OVF_I8:
9393 case CEE_CONV_OVF_U8:
9397 /* Special case this earlier so we have long constants in the IR */
9398 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9399 int data = sp [-1]->inst_c0;
9400 sp [-1]->opcode = OP_I8CONST;
9401 sp [-1]->type = STACK_I8;
9402 #if SIZEOF_REGISTER == 8
9403 if ((*ip) == CEE_CONV_U8)
9404 sp [-1]->inst_c0 = (guint32)data;
9406 sp [-1]->inst_c0 = data;
9408 sp [-1]->inst_ls_word = data;
9409 if ((*ip) == CEE_CONV_U8)
9410 sp [-1]->inst_ms_word = 0;
9412 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9414 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9421 case CEE_CONV_OVF_I4:
9422 case CEE_CONV_OVF_I1:
9423 case CEE_CONV_OVF_I2:
9424 case CEE_CONV_OVF_I:
9425 case CEE_CONV_OVF_U:
9428 if (sp [-1]->type == STACK_R8) {
9429 ADD_UNOP (CEE_CONV_OVF_I8);
9436 case CEE_CONV_OVF_U1:
9437 case CEE_CONV_OVF_U2:
9438 case CEE_CONV_OVF_U4:
9441 if (sp [-1]->type == STACK_R8) {
9442 ADD_UNOP (CEE_CONV_OVF_U8);
9449 case CEE_CONV_OVF_I1_UN:
9450 case CEE_CONV_OVF_I2_UN:
9451 case CEE_CONV_OVF_I4_UN:
9452 case CEE_CONV_OVF_I8_UN:
9453 case CEE_CONV_OVF_U1_UN:
9454 case CEE_CONV_OVF_U2_UN:
9455 case CEE_CONV_OVF_U4_UN:
9456 case CEE_CONV_OVF_U8_UN:
9457 case CEE_CONV_OVF_I_UN:
9458 case CEE_CONV_OVF_U_UN:
9465 CHECK_CFG_EXCEPTION;
9469 case CEE_ADD_OVF_UN:
9471 case CEE_MUL_OVF_UN:
9473 case CEE_SUB_OVF_UN:
9479 GSHAREDVT_FAILURE (*ip);
9482 token = read32 (ip + 1);
9483 klass = mini_get_class (method, token, generic_context);
9484 CHECK_TYPELOAD (klass);
9486 if (generic_class_is_reference_type (cfg, klass)) {
9487 MonoInst *store, *load;
9488 int dreg = alloc_ireg_ref (cfg);
9490 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9491 load->flags |= ins_flag;
9492 MONO_ADD_INS (cfg->cbb, load);
9494 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9495 store->flags |= ins_flag;
9496 MONO_ADD_INS (cfg->cbb, store);
9498 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9499 emit_write_barrier (cfg, sp [0], sp [1]);
9501 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9513 token = read32 (ip + 1);
9514 klass = mini_get_class (method, token, generic_context);
9515 CHECK_TYPELOAD (klass);
9517 /* Optimize the common ldobj+stloc combination */
9527 loc_index = ip [5] - CEE_STLOC_0;
9534 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9535 CHECK_LOCAL (loc_index);
9537 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9538 ins->dreg = cfg->locals [loc_index]->dreg;
9539 ins->flags |= ins_flag;
9542 if (ins_flag & MONO_INST_VOLATILE) {
9543 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9544 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9545 emit_memory_barrier (cfg, FullBarrier);
9551 /* Optimize the ldobj+stobj combination */
9552 /* The reference case ends up being a load+store anyway */
9553 /* Skip this if the operation is volatile. */
9554 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9559 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9566 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9567 ins->flags |= ins_flag;
9570 if (ins_flag & MONO_INST_VOLATILE) {
9571 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9572 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9573 emit_memory_barrier (cfg, FullBarrier);
9582 CHECK_STACK_OVF (1);
9584 n = read32 (ip + 1);
9586 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9587 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9588 ins->type = STACK_OBJ;
9591 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9592 MonoInst *iargs [1];
9594 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9595 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9597 if (cfg->opt & MONO_OPT_SHARED) {
9598 MonoInst *iargs [3];
9600 if (cfg->compile_aot) {
9601 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9603 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9604 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9605 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9606 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9607 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9609 if (bblock->out_of_line) {
9610 MonoInst *iargs [2];
9612 if (image == mono_defaults.corlib) {
9614 * Avoid relocations in AOT and save some space by using a
9615 * version of helper_ldstr specialized to mscorlib.
9617 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9618 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9620 /* Avoid creating the string object */
9621 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9622 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9623 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9627 if (cfg->compile_aot) {
9628 NEW_LDSTRCONST (cfg, ins, image, n);
9630 MONO_ADD_INS (bblock, ins);
9633 NEW_PCONST (cfg, ins, NULL);
9634 ins->type = STACK_OBJ;
9635 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9637 OUT_OF_MEMORY_FAILURE;
9640 MONO_ADD_INS (bblock, ins);
9649 MonoInst *iargs [2];
9650 MonoMethodSignature *fsig;
9653 MonoInst *vtable_arg = NULL;
9656 token = read32 (ip + 1);
9657 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9658 if (!cmethod || mono_loader_get_last_error ())
9660 fsig = mono_method_get_signature (cmethod, image, token);
9664 mono_save_token_info (cfg, image, token, cmethod);
9666 if (!mono_class_init (cmethod->klass))
9667 TYPE_LOAD_ERROR (cmethod->klass);
9669 context_used = mini_method_check_context_used (cfg, cmethod);
9671 if (mono_security_cas_enabled ()) {
9672 if (check_linkdemand (cfg, method, cmethod))
9673 INLINE_FAILURE ("linkdemand");
9674 CHECK_CFG_EXCEPTION;
9675 } else if (mono_security_core_clr_enabled ()) {
9676 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9679 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9680 emit_generic_class_init (cfg, cmethod->klass);
9681 CHECK_TYPELOAD (cmethod->klass);
9685 if (cfg->gsharedvt) {
9686 if (mini_is_gsharedvt_variable_signature (sig))
9687 GSHAREDVT_FAILURE (*ip);
9691 n = fsig->param_count;
9695 * Generate smaller code for the common newobj <exception> instruction in
9696 * argument checking code.
9698 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9699 is_exception_class (cmethod->klass) && n <= 2 &&
9700 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9701 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9702 MonoInst *iargs [3];
9706 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9709 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9713 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9718 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9721 g_assert_not_reached ();
9729 /* move the args to allow room for 'this' in the first position */
9735 /* check_call_signature () requires sp[0] to be set */
9736 this_ins.type = STACK_OBJ;
9738 if (check_call_signature (cfg, fsig, sp))
9743 if (mini_class_is_system_array (cmethod->klass)) {
9744 *sp = emit_get_rgctx_method (cfg, context_used,
9745 cmethod, MONO_RGCTX_INFO_METHOD);
9747 /* Avoid varargs in the common case */
9748 if (fsig->param_count == 1)
9749 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9750 else if (fsig->param_count == 2)
9751 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9752 else if (fsig->param_count == 3)
9753 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9754 else if (fsig->param_count == 4)
9755 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9757 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9758 } else if (cmethod->string_ctor) {
9759 g_assert (!context_used);
9760 g_assert (!vtable_arg);
9761 /* we simply pass a null pointer */
9762 EMIT_NEW_PCONST (cfg, *sp, NULL);
9763 /* now call the string ctor */
9764 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9766 if (cmethod->klass->valuetype) {
9767 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9768 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9769 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9774 * The code generated by mini_emit_virtual_call () expects
9775 * iargs [0] to be a boxed instance, but luckily the vcall
9776 * will be transformed into a normal call there.
9778 } else if (context_used) {
9779 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9782 MonoVTable *vtable = NULL;
9784 if (!cfg->compile_aot)
9785 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9786 CHECK_TYPELOAD (cmethod->klass);
9789 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9790 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9791 * As a workaround, we call class cctors before allocating objects.
9793 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9794 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9795 if (cfg->verbose_level > 2)
9796 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9797 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9800 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9803 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9806 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9808 /* Now call the actual ctor */
9809 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
9810 CHECK_CFG_EXCEPTION;
9813 if (alloc == NULL) {
9815 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9816 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9830 token = read32 (ip + 1);
9831 klass = mini_get_class (method, token, generic_context);
9832 CHECK_TYPELOAD (klass);
9833 if (sp [0]->type != STACK_OBJ)
9836 context_used = mini_class_check_context_used (cfg, klass);
9838 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9839 *sp = emit_castclass_with_cache_nonshared (cfg, sp [0], klass, &bblock);
9843 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9844 MonoMethod *mono_castclass;
9845 MonoInst *iargs [1];
9848 mono_castclass = mono_marshal_get_castclass (klass);
9851 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9852 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9853 iargs, ip, cfg->real_offset, TRUE, &bblock);
9854 reset_cast_details (cfg);
9855 CHECK_CFG_EXCEPTION;
9856 g_assert (costs > 0);
9859 cfg->real_offset += 5;
9863 inline_costs += costs;
9866 ins = handle_castclass (cfg, klass, *sp, context_used);
9867 CHECK_CFG_EXCEPTION;
9877 token = read32 (ip + 1);
9878 klass = mini_get_class (method, token, generic_context);
9879 CHECK_TYPELOAD (klass);
9880 if (sp [0]->type != STACK_OBJ)
9883 context_used = mini_class_check_context_used (cfg, klass);
9885 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9886 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9893 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9896 if (cfg->compile_aot)
9897 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9899 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9901 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9904 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9905 MonoMethod *mono_isinst;
9906 MonoInst *iargs [1];
9909 mono_isinst = mono_marshal_get_isinst (klass);
9912 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9913 iargs, ip, cfg->real_offset, TRUE, &bblock);
9914 CHECK_CFG_EXCEPTION;
9915 g_assert (costs > 0);
9918 cfg->real_offset += 5;
9922 inline_costs += costs;
9925 ins = handle_isinst (cfg, klass, *sp, context_used);
9926 CHECK_CFG_EXCEPTION;
9933 case CEE_UNBOX_ANY: {
9937 token = read32 (ip + 1);
9938 klass = mini_get_class (method, token, generic_context);
9939 CHECK_TYPELOAD (klass);
9941 mono_save_token_info (cfg, image, token, klass);
9943 context_used = mini_class_check_context_used (cfg, klass);
9945 if (mini_is_gsharedvt_klass (cfg, klass)) {
9946 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9954 if (generic_class_is_reference_type (cfg, klass)) {
9955 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9956 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9957 *sp = emit_castclass_with_cache_nonshared (cfg, sp [0], klass, &bblock);
9961 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9962 MonoMethod *mono_castclass;
9963 MonoInst *iargs [1];
9966 mono_castclass = mono_marshal_get_castclass (klass);
9969 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9970 iargs, ip, cfg->real_offset, TRUE, &bblock);
9971 CHECK_CFG_EXCEPTION;
9972 g_assert (costs > 0);
9975 cfg->real_offset += 5;
9978 inline_costs += costs;
9980 ins = handle_castclass (cfg, klass, *sp, context_used);
9981 CHECK_CFG_EXCEPTION;
9989 if (mono_class_is_nullable (klass)) {
9990 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9997 ins = handle_unbox (cfg, klass, sp, context_used);
10003 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10016 token = read32 (ip + 1);
10017 klass = mini_get_class (method, token, generic_context);
10018 CHECK_TYPELOAD (klass);
10020 mono_save_token_info (cfg, image, token, klass);
10022 context_used = mini_class_check_context_used (cfg, klass);
10024 if (generic_class_is_reference_type (cfg, klass)) {
10030 if (klass == mono_defaults.void_class)
10032 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10034 /* frequent check in generic code: box (struct), brtrue */
10036 // FIXME: LLVM can't handle the inconsistent bb linking
10037 if (!mono_class_is_nullable (klass) &&
10038 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10039 (ip [5] == CEE_BRTRUE ||
10040 ip [5] == CEE_BRTRUE_S ||
10041 ip [5] == CEE_BRFALSE ||
10042 ip [5] == CEE_BRFALSE_S)) {
10043 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10045 MonoBasicBlock *true_bb, *false_bb;
10049 if (cfg->verbose_level > 3) {
10050 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10051 printf ("<box+brtrue opt>\n");
10056 case CEE_BRFALSE_S:
10059 target = ip + 1 + (signed char)(*ip);
10066 target = ip + 4 + (gint)(read32 (ip));
10070 g_assert_not_reached ();
10074 * We need to link both bblocks, since it is needed for handling stack
10075 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10076 * Branching to only one of them would lead to inconsistencies, so
10077 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10079 GET_BBLOCK (cfg, true_bb, target);
10080 GET_BBLOCK (cfg, false_bb, ip);
10082 mono_link_bblock (cfg, cfg->cbb, true_bb);
10083 mono_link_bblock (cfg, cfg->cbb, false_bb);
10085 if (sp != stack_start) {
10086 handle_stack_args (cfg, stack_start, sp - stack_start);
10088 CHECK_UNVERIFIABLE (cfg);
10091 if (COMPILE_LLVM (cfg)) {
10092 dreg = alloc_ireg (cfg);
10093 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10094 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10096 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10098 /* The JIT can't eliminate the iconst+compare */
10099 MONO_INST_NEW (cfg, ins, OP_BR);
10100 ins->inst_target_bb = is_true ? true_bb : false_bb;
10101 MONO_ADD_INS (cfg->cbb, ins);
10104 start_new_bblock = 1;
10108 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10110 CHECK_CFG_EXCEPTION;
10119 token = read32 (ip + 1);
10120 klass = mini_get_class (method, token, generic_context);
10121 CHECK_TYPELOAD (klass);
10123 mono_save_token_info (cfg, image, token, klass);
10125 context_used = mini_class_check_context_used (cfg, klass);
10127 if (mono_class_is_nullable (klass)) {
10130 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10131 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10135 ins = handle_unbox (cfg, klass, sp, context_used);
10148 MonoClassField *field;
10149 #ifndef DISABLE_REMOTING
10153 gboolean is_instance;
10155 gpointer addr = NULL;
10156 gboolean is_special_static;
10158 MonoInst *store_val = NULL;
10159 MonoInst *thread_ins;
10162 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10164 if (op == CEE_STFLD) {
10167 store_val = sp [1];
10172 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10174 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10177 if (op == CEE_STSFLD) {
10180 store_val = sp [0];
10185 token = read32 (ip + 1);
10186 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10187 field = mono_method_get_wrapper_data (method, token);
10188 klass = field->parent;
10191 field = mono_field_from_token (image, token, &klass, generic_context);
10195 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10196 FIELD_ACCESS_FAILURE (method, field);
10197 mono_class_init (klass);
10199 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10202 /* if the class is Critical then transparent code cannot access it's fields */
10203 if (!is_instance && mono_security_core_clr_enabled ())
10204 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10206 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10207 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10208 if (mono_security_core_clr_enabled ())
10209 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10213 * LDFLD etc. is usable on static fields as well, so convert those cases to
10216 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10228 g_assert_not_reached ();
10230 is_instance = FALSE;
10233 context_used = mini_class_check_context_used (cfg, klass);
10235 /* INSTANCE CASE */
10237 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10238 if (op == CEE_STFLD) {
10239 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10241 #ifndef DISABLE_REMOTING
10242 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10243 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10244 MonoInst *iargs [5];
10246 GSHAREDVT_FAILURE (op);
10248 iargs [0] = sp [0];
10249 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10250 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10251 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10253 iargs [4] = sp [1];
10255 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10256 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10257 iargs, ip, cfg->real_offset, TRUE, &bblock);
10258 CHECK_CFG_EXCEPTION;
10259 g_assert (costs > 0);
10261 cfg->real_offset += 5;
10263 inline_costs += costs;
10265 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10272 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10274 if (mini_is_gsharedvt_klass (cfg, klass)) {
10275 MonoInst *offset_ins;
10277 context_used = mini_class_check_context_used (cfg, klass);
10279 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10280 dreg = alloc_ireg_mp (cfg);
10281 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10282 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10283 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10285 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10287 if (sp [0]->opcode != OP_LDADDR)
10288 store->flags |= MONO_INST_FAULT;
10290 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10291 /* insert call to write barrier */
10295 dreg = alloc_ireg_mp (cfg);
10296 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10297 emit_write_barrier (cfg, ptr, sp [1]);
10300 store->flags |= ins_flag;
10307 #ifndef DISABLE_REMOTING
10308 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10309 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10310 MonoInst *iargs [4];
10312 GSHAREDVT_FAILURE (op);
10314 iargs [0] = sp [0];
10315 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10316 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10317 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10318 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10319 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10320 iargs, ip, cfg->real_offset, TRUE, &bblock);
10321 CHECK_CFG_EXCEPTION;
10322 g_assert (costs > 0);
10324 cfg->real_offset += 5;
10328 inline_costs += costs;
10330 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10336 if (sp [0]->type == STACK_VTYPE) {
10339 /* Have to compute the address of the variable */
10341 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10343 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10345 g_assert (var->klass == klass);
10347 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10351 if (op == CEE_LDFLDA) {
10352 if (is_magic_tls_access (field)) {
10353 GSHAREDVT_FAILURE (*ip);
10355 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10357 if (sp [0]->type == STACK_OBJ) {
10358 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10359 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10362 dreg = alloc_ireg_mp (cfg);
10364 if (mini_is_gsharedvt_klass (cfg, klass)) {
10365 MonoInst *offset_ins;
10367 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10368 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10370 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10372 ins->klass = mono_class_from_mono_type (field->type);
10373 ins->type = STACK_MP;
10379 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10381 if (mini_is_gsharedvt_klass (cfg, klass)) {
10382 MonoInst *offset_ins;
10384 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10385 dreg = alloc_ireg_mp (cfg);
10386 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10387 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10389 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10391 load->flags |= ins_flag;
10392 if (sp [0]->opcode != OP_LDADDR)
10393 load->flags |= MONO_INST_FAULT;
10407 * We can only support shared generic static
10408 * field access on architectures where the
10409 * trampoline code has been extended to handle
10410 * the generic class init.
10412 #ifndef MONO_ARCH_VTABLE_REG
10413 GENERIC_SHARING_FAILURE (op);
10416 context_used = mini_class_check_context_used (cfg, klass);
10418 ftype = mono_field_get_type (field);
10420 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10423 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10424 * to be called here.
10426 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10427 mono_class_vtable (cfg->domain, klass);
10428 CHECK_TYPELOAD (klass);
10430 mono_domain_lock (cfg->domain);
10431 if (cfg->domain->special_static_fields)
10432 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10433 mono_domain_unlock (cfg->domain);
10435 is_special_static = mono_class_field_is_special_static (field);
10437 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10438 thread_ins = mono_get_thread_intrinsic (cfg);
10442 /* Generate IR to compute the field address */
10443 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10445 * Fast access to TLS data
10446 * Inline version of get_thread_static_data () in
10450 int idx, static_data_reg, array_reg, dreg;
10452 GSHAREDVT_FAILURE (op);
10454 // offset &= 0x7fffffff;
10455 // idx = (offset >> 24) - 1;
10456 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10457 MONO_ADD_INS (cfg->cbb, thread_ins);
10458 static_data_reg = alloc_ireg (cfg);
10459 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10461 if (cfg->compile_aot) {
10462 int offset_reg, offset2_reg, idx_reg;
10464 /* For TLS variables, this will return the TLS offset */
10465 EMIT_NEW_SFLDACONST (cfg, ins, field);
10466 offset_reg = ins->dreg;
10467 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10468 idx_reg = alloc_ireg (cfg);
10469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10470 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10472 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10473 array_reg = alloc_ireg (cfg);
10474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10475 offset2_reg = alloc_ireg (cfg);
10476 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10477 dreg = alloc_ireg (cfg);
10478 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10480 offset = (gsize)addr & 0x7fffffff;
10481 idx = (offset >> 24) - 1;
10483 array_reg = alloc_ireg (cfg);
10484 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10485 dreg = alloc_ireg (cfg);
10486 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10488 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10489 (cfg->compile_aot && is_special_static) ||
10490 (context_used && is_special_static)) {
10491 MonoInst *iargs [2];
10493 g_assert (field->parent);
10494 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10495 if (context_used) {
10496 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10497 field, MONO_RGCTX_INFO_CLASS_FIELD);
10499 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10501 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10502 } else if (context_used) {
10503 MonoInst *static_data;
10506 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10507 method->klass->name_space, method->klass->name, method->name,
10508 depth, field->offset);
10511 if (mono_class_needs_cctor_run (klass, method))
10512 emit_generic_class_init (cfg, klass);
10515 * The pointer we're computing here is
10517 * super_info.static_data + field->offset
10519 static_data = emit_get_rgctx_klass (cfg, context_used,
10520 klass, MONO_RGCTX_INFO_STATIC_DATA);
10522 if (mini_is_gsharedvt_klass (cfg, klass)) {
10523 MonoInst *offset_ins;
10525 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10526 dreg = alloc_ireg_mp (cfg);
10527 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10528 } else if (field->offset == 0) {
10531 int addr_reg = mono_alloc_preg (cfg);
10532 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10534 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10535 MonoInst *iargs [2];
10537 g_assert (field->parent);
10538 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10539 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10540 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10542 MonoVTable *vtable = NULL;
10544 if (!cfg->compile_aot)
10545 vtable = mono_class_vtable (cfg->domain, klass);
10546 CHECK_TYPELOAD (klass);
10549 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10550 if (!(g_slist_find (class_inits, klass))) {
10551 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10552 if (cfg->verbose_level > 2)
10553 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10554 class_inits = g_slist_prepend (class_inits, klass);
10557 if (cfg->run_cctors) {
10559 /* This makes so that inline cannot trigger */
10560 /* .cctors: too many apps depend on them */
10561 /* running with a specific order... */
10563 if (! vtable->initialized)
10564 INLINE_FAILURE ("class init");
10565 ex = mono_runtime_class_init_full (vtable, FALSE);
10567 set_exception_object (cfg, ex);
10568 goto exception_exit;
10572 if (cfg->compile_aot)
10573 EMIT_NEW_SFLDACONST (cfg, ins, field);
10576 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10578 EMIT_NEW_PCONST (cfg, ins, addr);
10581 MonoInst *iargs [1];
10582 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10583 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10587 /* Generate IR to do the actual load/store operation */
10589 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10590 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10591 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10592 emit_memory_barrier (cfg, FullBarrier);
10595 if (op == CEE_LDSFLDA) {
10596 ins->klass = mono_class_from_mono_type (ftype);
10597 ins->type = STACK_PTR;
10599 } else if (op == CEE_STSFLD) {
10602 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10603 store->flags |= ins_flag;
10605 gboolean is_const = FALSE;
10606 MonoVTable *vtable = NULL;
10607 gpointer addr = NULL;
10609 if (!context_used) {
10610 vtable = mono_class_vtable (cfg->domain, klass);
10611 CHECK_TYPELOAD (klass);
10613 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10614 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10615 int ro_type = ftype->type;
10617 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10618 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10619 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10622 GSHAREDVT_FAILURE (op);
10624 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10627 case MONO_TYPE_BOOLEAN:
10629 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10633 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10636 case MONO_TYPE_CHAR:
10638 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10642 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10647 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10651 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10656 case MONO_TYPE_PTR:
10657 case MONO_TYPE_FNPTR:
10658 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10659 type_to_eval_stack_type ((cfg), field->type, *sp);
10662 case MONO_TYPE_STRING:
10663 case MONO_TYPE_OBJECT:
10664 case MONO_TYPE_CLASS:
10665 case MONO_TYPE_SZARRAY:
10666 case MONO_TYPE_ARRAY:
10667 if (!mono_gc_is_moving ()) {
10668 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10669 type_to_eval_stack_type ((cfg), field->type, *sp);
10677 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10682 case MONO_TYPE_VALUETYPE:
10692 CHECK_STACK_OVF (1);
10694 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10695 load->flags |= ins_flag;
10701 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10702 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10703 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10704 emit_memory_barrier (cfg, FullBarrier);
10715 token = read32 (ip + 1);
10716 klass = mini_get_class (method, token, generic_context);
10717 CHECK_TYPELOAD (klass);
10718 if (ins_flag & MONO_INST_VOLATILE) {
10719 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10720 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10721 emit_memory_barrier (cfg, FullBarrier);
10723 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10724 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10725 ins->flags |= ins_flag;
10726 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10727 generic_class_is_reference_type (cfg, klass)) {
10728 /* insert call to write barrier */
10729 emit_write_barrier (cfg, sp [0], sp [1]);
10741 const char *data_ptr;
10743 guint32 field_token;
10749 token = read32 (ip + 1);
10751 klass = mini_get_class (method, token, generic_context);
10752 CHECK_TYPELOAD (klass);
10754 context_used = mini_class_check_context_used (cfg, klass);
10756 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10757 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10758 ins->sreg1 = sp [0]->dreg;
10759 ins->type = STACK_I4;
10760 ins->dreg = alloc_ireg (cfg);
10761 MONO_ADD_INS (cfg->cbb, ins);
10762 *sp = mono_decompose_opcode (cfg, ins);
10765 if (context_used) {
10766 MonoInst *args [3];
10767 MonoClass *array_class = mono_array_class_get (klass, 1);
10768 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10770 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10773 args [0] = emit_get_rgctx_klass (cfg, context_used,
10774 array_class, MONO_RGCTX_INFO_VTABLE);
10779 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10781 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10783 if (cfg->opt & MONO_OPT_SHARED) {
10784 /* Decompose now to avoid problems with references to the domainvar */
10785 MonoInst *iargs [3];
10787 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10788 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10789 iargs [2] = sp [0];
10791 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10793 /* Decompose later since it is needed by abcrem */
10794 MonoClass *array_type = mono_array_class_get (klass, 1);
10795 mono_class_vtable (cfg->domain, array_type);
10796 CHECK_TYPELOAD (array_type);
10798 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10799 ins->dreg = alloc_ireg_ref (cfg);
10800 ins->sreg1 = sp [0]->dreg;
10801 ins->inst_newa_class = klass;
10802 ins->type = STACK_OBJ;
10803 ins->klass = array_type;
10804 MONO_ADD_INS (cfg->cbb, ins);
10805 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10806 cfg->cbb->has_array_access = TRUE;
10808 /* Needed so mono_emit_load_get_addr () gets called */
10809 mono_get_got_var (cfg);
10819 * we inline/optimize the initialization sequence if possible.
10820 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10821 * for small sizes open code the memcpy
10822 * ensure the rva field is big enough
10824 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10825 MonoMethod *memcpy_method = get_memcpy_method ();
10826 MonoInst *iargs [3];
10827 int add_reg = alloc_ireg_mp (cfg);
10829 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10830 if (cfg->compile_aot) {
10831 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10833 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10835 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10836 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10845 if (sp [0]->type != STACK_OBJ)
10848 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10849 ins->dreg = alloc_preg (cfg);
10850 ins->sreg1 = sp [0]->dreg;
10851 ins->type = STACK_I4;
10852 /* This flag will be inherited by the decomposition */
10853 ins->flags |= MONO_INST_FAULT;
10854 MONO_ADD_INS (cfg->cbb, ins);
10855 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10856 cfg->cbb->has_array_access = TRUE;
10864 if (sp [0]->type != STACK_OBJ)
10867 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10869 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10870 CHECK_TYPELOAD (klass);
10871 /* we need to make sure that this array is exactly the type it needs
10872 * to be for correctness. the wrappers are lax with their usage
10873 * so we need to ignore them here
10875 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10876 MonoClass *array_class = mono_array_class_get (klass, 1);
10877 mini_emit_check_array_type (cfg, sp [0], array_class);
10878 CHECK_TYPELOAD (array_class);
10882 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10887 case CEE_LDELEM_I1:
10888 case CEE_LDELEM_U1:
10889 case CEE_LDELEM_I2:
10890 case CEE_LDELEM_U2:
10891 case CEE_LDELEM_I4:
10892 case CEE_LDELEM_U4:
10893 case CEE_LDELEM_I8:
10895 case CEE_LDELEM_R4:
10896 case CEE_LDELEM_R8:
10897 case CEE_LDELEM_REF: {
10903 if (*ip == CEE_LDELEM) {
10905 token = read32 (ip + 1);
10906 klass = mini_get_class (method, token, generic_context);
10907 CHECK_TYPELOAD (klass);
10908 mono_class_init (klass);
10911 klass = array_access_to_klass (*ip);
10913 if (sp [0]->type != STACK_OBJ)
10916 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10918 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10919 // FIXME-VT: OP_ICONST optimization
10920 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10921 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10922 ins->opcode = OP_LOADV_MEMBASE;
10923 } else if (sp [1]->opcode == OP_ICONST) {
10924 int array_reg = sp [0]->dreg;
10925 int index_reg = sp [1]->dreg;
10926 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
10928 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10929 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10931 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10932 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10935 if (*ip == CEE_LDELEM)
10942 case CEE_STELEM_I1:
10943 case CEE_STELEM_I2:
10944 case CEE_STELEM_I4:
10945 case CEE_STELEM_I8:
10946 case CEE_STELEM_R4:
10947 case CEE_STELEM_R8:
10948 case CEE_STELEM_REF:
10953 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10955 if (*ip == CEE_STELEM) {
10957 token = read32 (ip + 1);
10958 klass = mini_get_class (method, token, generic_context);
10959 CHECK_TYPELOAD (klass);
10960 mono_class_init (klass);
10963 klass = array_access_to_klass (*ip);
10965 if (sp [0]->type != STACK_OBJ)
10968 emit_array_store (cfg, klass, sp, TRUE);
10970 if (*ip == CEE_STELEM)
10977 case CEE_CKFINITE: {
10981 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10982 ins->sreg1 = sp [0]->dreg;
10983 ins->dreg = alloc_freg (cfg);
10984 ins->type = STACK_R8;
10985 MONO_ADD_INS (bblock, ins);
10987 *sp++ = mono_decompose_opcode (cfg, ins);
10992 case CEE_REFANYVAL: {
10993 MonoInst *src_var, *src;
10995 int klass_reg = alloc_preg (cfg);
10996 int dreg = alloc_preg (cfg);
10998 GSHAREDVT_FAILURE (*ip);
11001 MONO_INST_NEW (cfg, ins, *ip);
11004 klass = mono_class_get_and_inflate_typespec_checked (image, read32 (ip + 1), generic_context, &error);
11005 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11006 CHECK_TYPELOAD (klass);
11007 mono_class_init (klass);
11009 context_used = mini_class_check_context_used (cfg, klass);
11012 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11014 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11015 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11016 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11018 if (context_used) {
11019 MonoInst *klass_ins;
11021 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11022 klass, MONO_RGCTX_INFO_KLASS);
11025 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11026 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11028 mini_emit_class_check (cfg, klass_reg, klass);
11030 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11031 ins->type = STACK_MP;
11036 case CEE_MKREFANY: {
11037 MonoInst *loc, *addr;
11039 GSHAREDVT_FAILURE (*ip);
11042 MONO_INST_NEW (cfg, ins, *ip);
11045 klass = mono_class_get_and_inflate_typespec_checked (image, read32 (ip + 1), generic_context, &error);
11046 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11047 CHECK_TYPELOAD (klass);
11048 mono_class_init (klass);
11050 context_used = mini_class_check_context_used (cfg, klass);
11052 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11053 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11055 if (context_used) {
11056 MonoInst *const_ins;
11057 int type_reg = alloc_preg (cfg);
11059 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11060 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11061 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11062 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11063 } else if (cfg->compile_aot) {
11064 int const_reg = alloc_preg (cfg);
11065 int type_reg = alloc_preg (cfg);
11067 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11068 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11070 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11072 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11073 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11075 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11077 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11078 ins->type = STACK_VTYPE;
11079 ins->klass = mono_defaults.typed_reference_class;
11084 case CEE_LDTOKEN: {
11086 MonoClass *handle_class;
11088 CHECK_STACK_OVF (1);
11091 n = read32 (ip + 1);
11093 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11094 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11095 handle = mono_method_get_wrapper_data (method, n);
11096 handle_class = mono_method_get_wrapper_data (method, n + 1);
11097 if (handle_class == mono_defaults.typehandle_class)
11098 handle = &((MonoClass*)handle)->byval_arg;
11101 handle = mono_ldtoken (image, n, &handle_class, generic_context);
11105 mono_class_init (handle_class);
11106 if (cfg->generic_sharing_context) {
11107 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11108 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11109 /* This case handles ldtoken
11110 of an open type, like for
11113 } else if (handle_class == mono_defaults.typehandle_class) {
11114 /* If we get a MONO_TYPE_CLASS
11115 then we need to provide the
11117 instantiation of it. */
11118 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
11121 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11122 } else if (handle_class == mono_defaults.fieldhandle_class)
11123 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11124 else if (handle_class == mono_defaults.methodhandle_class)
11125 context_used = mini_method_check_context_used (cfg, handle);
11127 g_assert_not_reached ();
11130 if ((cfg->opt & MONO_OPT_SHARED) &&
11131 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11132 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11133 MonoInst *addr, *vtvar, *iargs [3];
11134 int method_context_used;
11136 method_context_used = mini_method_check_context_used (cfg, method);
11138 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11140 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11141 EMIT_NEW_ICONST (cfg, iargs [1], n);
11142 if (method_context_used) {
11143 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11144 method, MONO_RGCTX_INFO_METHOD);
11145 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11147 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11148 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11150 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11152 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11154 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11156 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11157 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11158 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11159 (cmethod->klass == mono_defaults.systemtype_class) &&
11160 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11161 MonoClass *tclass = mono_class_from_mono_type (handle);
11163 mono_class_init (tclass);
11164 if (context_used) {
11165 ins = emit_get_rgctx_klass (cfg, context_used,
11166 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11167 } else if (cfg->compile_aot) {
11168 if (method->wrapper_type) {
11169 mono_error_init (&error); //got to do it since there are multiple conditionals below
11170 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11171 /* Special case for static synchronized wrappers */
11172 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11174 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11175 /* FIXME: n is not a normal token */
11177 EMIT_NEW_PCONST (cfg, ins, NULL);
11180 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11183 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11185 ins->type = STACK_OBJ;
11186 ins->klass = cmethod->klass;
11189 MonoInst *addr, *vtvar;
11191 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11193 if (context_used) {
11194 if (handle_class == mono_defaults.typehandle_class) {
11195 ins = emit_get_rgctx_klass (cfg, context_used,
11196 mono_class_from_mono_type (handle),
11197 MONO_RGCTX_INFO_TYPE);
11198 } else if (handle_class == mono_defaults.methodhandle_class) {
11199 ins = emit_get_rgctx_method (cfg, context_used,
11200 handle, MONO_RGCTX_INFO_METHOD);
11201 } else if (handle_class == mono_defaults.fieldhandle_class) {
11202 ins = emit_get_rgctx_field (cfg, context_used,
11203 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11205 g_assert_not_reached ();
11207 } else if (cfg->compile_aot) {
11208 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11210 EMIT_NEW_PCONST (cfg, ins, handle);
11212 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11213 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11214 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11224 MONO_INST_NEW (cfg, ins, OP_THROW);
11226 ins->sreg1 = sp [0]->dreg;
11228 bblock->out_of_line = TRUE;
11229 MONO_ADD_INS (bblock, ins);
11230 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11231 MONO_ADD_INS (bblock, ins);
11234 link_bblock (cfg, bblock, end_bblock);
11235 start_new_bblock = 1;
11237 case CEE_ENDFINALLY:
11238 /* mono_save_seq_point_info () depends on this */
11239 if (sp != stack_start)
11240 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11241 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11242 MONO_ADD_INS (bblock, ins);
11244 start_new_bblock = 1;
11247 * Control will leave the method so empty the stack, otherwise
11248 * the next basic block will start with a nonempty stack.
11250 while (sp != stack_start) {
11255 case CEE_LEAVE_S: {
11258 if (*ip == CEE_LEAVE) {
11260 target = ip + 5 + (gint32)read32(ip + 1);
11263 target = ip + 2 + (signed char)(ip [1]);
11266 /* empty the stack */
11267 while (sp != stack_start) {
11272 * If this leave statement is in a catch block, check for a
11273 * pending exception, and rethrow it if necessary.
11274 * We avoid doing this in runtime invoke wrappers, since those are called
11275 * by native code which excepts the wrapper to catch all exceptions.
11277 for (i = 0; i < header->num_clauses; ++i) {
11278 MonoExceptionClause *clause = &header->clauses [i];
11281 * Use <= in the final comparison to handle clauses with multiple
11282 * leave statements, like in bug #78024.
11283 * The ordering of the exception clauses guarantees that we find the
11284 * innermost clause.
11286 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11288 MonoBasicBlock *dont_throw;
11293 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11296 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11298 NEW_BBLOCK (cfg, dont_throw);
11301 * Currently, we always rethrow the abort exception, despite the
11302 * fact that this is not correct. See thread6.cs for an example.
11303 * But propagating the abort exception is more important than
11304 * getting the sematics right.
11306 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11307 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11308 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11310 MONO_START_BB (cfg, dont_throw);
11315 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11317 MonoExceptionClause *clause;
11319 for (tmp = handlers; tmp; tmp = tmp->next) {
11320 clause = tmp->data;
11321 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11323 link_bblock (cfg, bblock, tblock);
11324 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11325 ins->inst_target_bb = tblock;
11326 ins->inst_eh_block = clause;
11327 MONO_ADD_INS (bblock, ins);
11328 bblock->has_call_handler = 1;
11329 if (COMPILE_LLVM (cfg)) {
11330 MonoBasicBlock *target_bb;
11333 * Link the finally bblock with the target, since it will
11334 * conceptually branch there.
11335 * FIXME: Have to link the bblock containing the endfinally.
11337 GET_BBLOCK (cfg, target_bb, target);
11338 link_bblock (cfg, tblock, target_bb);
11341 g_list_free (handlers);
11344 MONO_INST_NEW (cfg, ins, OP_BR);
11345 MONO_ADD_INS (bblock, ins);
11346 GET_BBLOCK (cfg, tblock, target);
11347 link_bblock (cfg, bblock, tblock);
11348 ins->inst_target_bb = tblock;
11349 start_new_bblock = 1;
11351 if (*ip == CEE_LEAVE)
11360 * Mono specific opcodes
11362 case MONO_CUSTOM_PREFIX: {
11364 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11368 case CEE_MONO_ICALL: {
11370 MonoJitICallInfo *info;
11372 token = read32 (ip + 2);
11373 func = mono_method_get_wrapper_data (method, token);
11374 info = mono_find_jit_icall_by_addr (func);
11376 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11379 CHECK_STACK (info->sig->param_count);
11380 sp -= info->sig->param_count;
11382 ins = mono_emit_jit_icall (cfg, info->func, sp);
11383 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11387 inline_costs += 10 * num_calls++;
11391 case CEE_MONO_LDPTR: {
11394 CHECK_STACK_OVF (1);
11396 token = read32 (ip + 2);
11398 ptr = mono_method_get_wrapper_data (method, token);
11399 /* FIXME: Generalize this */
11400 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11401 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11406 EMIT_NEW_PCONST (cfg, ins, ptr);
11409 inline_costs += 10 * num_calls++;
11410 /* Can't embed random pointers into AOT code */
11414 case CEE_MONO_JIT_ICALL_ADDR: {
11415 MonoJitICallInfo *callinfo;
11418 CHECK_STACK_OVF (1);
11420 token = read32 (ip + 2);
11422 ptr = mono_method_get_wrapper_data (method, token);
11423 callinfo = mono_find_jit_icall_by_addr (ptr);
11424 g_assert (callinfo);
11425 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11428 inline_costs += 10 * num_calls++;
11431 case CEE_MONO_ICALL_ADDR: {
11432 MonoMethod *cmethod;
11435 CHECK_STACK_OVF (1);
11437 token = read32 (ip + 2);
11439 cmethod = mono_method_get_wrapper_data (method, token);
11441 if (cfg->compile_aot) {
11442 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11444 ptr = mono_lookup_internal_call (cmethod);
11446 EMIT_NEW_PCONST (cfg, ins, ptr);
11452 case CEE_MONO_VTADDR: {
11453 MonoInst *src_var, *src;
11459 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11460 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11465 case CEE_MONO_NEWOBJ: {
11466 MonoInst *iargs [2];
11468 CHECK_STACK_OVF (1);
11470 token = read32 (ip + 2);
11471 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11472 mono_class_init (klass);
11473 NEW_DOMAINCONST (cfg, iargs [0]);
11474 MONO_ADD_INS (cfg->cbb, iargs [0]);
11475 NEW_CLASSCONST (cfg, iargs [1], klass);
11476 MONO_ADD_INS (cfg->cbb, iargs [1]);
11477 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11479 inline_costs += 10 * num_calls++;
11482 case CEE_MONO_OBJADDR:
11485 MONO_INST_NEW (cfg, ins, OP_MOVE);
11486 ins->dreg = alloc_ireg_mp (cfg);
11487 ins->sreg1 = sp [0]->dreg;
11488 ins->type = STACK_MP;
11489 MONO_ADD_INS (cfg->cbb, ins);
11493 case CEE_MONO_LDNATIVEOBJ:
11495 * Similar to LDOBJ, but instead load the unmanaged
11496 * representation of the vtype to the stack.
11501 token = read32 (ip + 2);
11502 klass = mono_method_get_wrapper_data (method, token);
11503 g_assert (klass->valuetype);
11504 mono_class_init (klass);
11507 MonoInst *src, *dest, *temp;
11510 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11511 temp->backend.is_pinvoke = 1;
11512 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11513 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11515 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11516 dest->type = STACK_VTYPE;
11517 dest->klass = klass;
11523 case CEE_MONO_RETOBJ: {
11525 * Same as RET, but return the native representation of a vtype
11528 g_assert (cfg->ret);
11529 g_assert (mono_method_signature (method)->pinvoke);
11534 token = read32 (ip + 2);
11535 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11537 if (!cfg->vret_addr) {
11538 g_assert (cfg->ret_var_is_local);
11540 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11542 EMIT_NEW_RETLOADA (cfg, ins);
11544 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11546 if (sp != stack_start)
11549 MONO_INST_NEW (cfg, ins, OP_BR);
11550 ins->inst_target_bb = end_bblock;
11551 MONO_ADD_INS (bblock, ins);
11552 link_bblock (cfg, bblock, end_bblock);
11553 start_new_bblock = 1;
11557 case CEE_MONO_CISINST:
11558 case CEE_MONO_CCASTCLASS: {
11563 token = read32 (ip + 2);
11564 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11565 if (ip [1] == CEE_MONO_CISINST)
11566 ins = handle_cisinst (cfg, klass, sp [0]);
11568 ins = handle_ccastclass (cfg, klass, sp [0]);
11574 case CEE_MONO_SAVE_LMF:
11575 case CEE_MONO_RESTORE_LMF:
11576 #ifdef MONO_ARCH_HAVE_LMF_OPS
11577 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11578 MONO_ADD_INS (bblock, ins);
11579 cfg->need_lmf_area = TRUE;
11583 case CEE_MONO_CLASSCONST:
11584 CHECK_STACK_OVF (1);
11586 token = read32 (ip + 2);
11587 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11590 inline_costs += 10 * num_calls++;
11592 case CEE_MONO_NOT_TAKEN:
11593 bblock->out_of_line = TRUE;
11596 case CEE_MONO_TLS: {
11599 CHECK_STACK_OVF (1);
11601 key = (gint32)read32 (ip + 2);
11602 g_assert (key < TLS_KEY_NUM);
11604 ins = mono_create_tls_get (cfg, key);
11606 if (cfg->compile_aot) {
11608 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11609 ins->dreg = alloc_preg (cfg);
11610 ins->type = STACK_PTR;
11612 g_assert_not_reached ();
11615 ins->type = STACK_PTR;
11616 MONO_ADD_INS (bblock, ins);
11621 case CEE_MONO_DYN_CALL: {
11622 MonoCallInst *call;
11624 /* It would be easier to call a trampoline, but that would put an
11625 * extra frame on the stack, confusing exception handling. So
11626 * implement it inline using an opcode for now.
11629 if (!cfg->dyn_call_var) {
11630 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11631 /* prevent it from being register allocated */
11632 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11635 /* Has to use a call inst since it local regalloc expects it */
11636 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11637 ins = (MonoInst*)call;
11639 ins->sreg1 = sp [0]->dreg;
11640 ins->sreg2 = sp [1]->dreg;
11641 MONO_ADD_INS (bblock, ins);
11643 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11646 inline_costs += 10 * num_calls++;
11650 case CEE_MONO_MEMORY_BARRIER: {
11652 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11656 case CEE_MONO_JIT_ATTACH: {
11657 MonoInst *args [16], *domain_ins;
11658 MonoInst *ad_ins, *jit_tls_ins;
11659 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11661 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11663 EMIT_NEW_PCONST (cfg, ins, NULL);
11664 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11666 ad_ins = mono_get_domain_intrinsic (cfg);
11667 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
11669 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
11670 NEW_BBLOCK (cfg, next_bb);
11671 NEW_BBLOCK (cfg, call_bb);
11673 if (cfg->compile_aot) {
11674 /* AOT code is only used in the root domain */
11675 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11677 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11679 MONO_ADD_INS (cfg->cbb, ad_ins);
11680 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11681 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11683 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
11684 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11687 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11688 MONO_START_BB (cfg, call_bb);
11691 if (cfg->compile_aot) {
11692 /* AOT code is only used in the root domain */
11693 EMIT_NEW_PCONST (cfg, args [0], NULL);
11695 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11697 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11698 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11701 MONO_START_BB (cfg, next_bb);
11707 case CEE_MONO_JIT_DETACH: {
11708 MonoInst *args [16];
11710 /* Restore the original domain */
11711 dreg = alloc_ireg (cfg);
11712 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11713 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11718 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11724 case CEE_PREFIX1: {
11727 case CEE_ARGLIST: {
11728 /* somewhat similar to LDTOKEN */
11729 MonoInst *addr, *vtvar;
11730 CHECK_STACK_OVF (1);
11731 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11733 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11734 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11736 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11737 ins->type = STACK_VTYPE;
11738 ins->klass = mono_defaults.argumenthandle_class;
11751 * The following transforms:
11752 * CEE_CEQ into OP_CEQ
11753 * CEE_CGT into OP_CGT
11754 * CEE_CGT_UN into OP_CGT_UN
11755 * CEE_CLT into OP_CLT
11756 * CEE_CLT_UN into OP_CLT_UN
11758 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11760 MONO_INST_NEW (cfg, ins, cmp->opcode);
11762 cmp->sreg1 = sp [0]->dreg;
11763 cmp->sreg2 = sp [1]->dreg;
11764 type_from_op (cmp, sp [0], sp [1]);
11766 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11767 cmp->opcode = OP_LCOMPARE;
11768 else if (sp [0]->type == STACK_R8)
11769 cmp->opcode = OP_FCOMPARE;
11771 cmp->opcode = OP_ICOMPARE;
11772 MONO_ADD_INS (bblock, cmp);
11773 ins->type = STACK_I4;
11774 ins->dreg = alloc_dreg (cfg, ins->type);
11775 type_from_op (ins, sp [0], sp [1]);
11777 if (cmp->opcode == OP_FCOMPARE) {
11779 * The backends expect the fceq opcodes to do the
11782 ins->sreg1 = cmp->sreg1;
11783 ins->sreg2 = cmp->sreg2;
11786 MONO_ADD_INS (bblock, ins);
11792 MonoInst *argconst;
11793 MonoMethod *cil_method;
11795 CHECK_STACK_OVF (1);
11797 n = read32 (ip + 2);
11798 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11799 if (!cmethod || mono_loader_get_last_error ())
11801 mono_class_init (cmethod->klass);
11803 mono_save_token_info (cfg, image, n, cmethod);
11805 context_used = mini_method_check_context_used (cfg, cmethod);
11807 cil_method = cmethod;
11808 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11809 METHOD_ACCESS_FAILURE (method, cil_method);
11811 if (mono_security_cas_enabled ()) {
11812 if (check_linkdemand (cfg, method, cmethod))
11813 INLINE_FAILURE ("linkdemand");
11814 CHECK_CFG_EXCEPTION;
11815 } else if (mono_security_core_clr_enabled ()) {
11816 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11820 * Optimize the common case of ldftn+delegate creation
11822 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11823 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11824 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11825 MonoInst *target_ins, *handle_ins;
11826 MonoMethod *invoke;
11827 int invoke_context_used;
11829 invoke = mono_get_delegate_invoke (ctor_method->klass);
11830 if (!invoke || !mono_method_signature (invoke))
11833 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11835 target_ins = sp [-1];
11837 if (mono_security_core_clr_enabled ())
11838 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11840 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11841 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11842 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11843 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11844 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11848 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11849 /* FIXME: SGEN support */
11850 if (invoke_context_used == 0) {
11852 if (cfg->verbose_level > 3)
11853 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11854 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
11857 CHECK_CFG_EXCEPTION;
11868 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11869 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11873 inline_costs += 10 * num_calls++;
11876 case CEE_LDVIRTFTN: {
11877 MonoInst *args [2];
11881 n = read32 (ip + 2);
11882 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11883 if (!cmethod || mono_loader_get_last_error ())
11885 mono_class_init (cmethod->klass);
11887 context_used = mini_method_check_context_used (cfg, cmethod);
11889 if (mono_security_cas_enabled ()) {
11890 if (check_linkdemand (cfg, method, cmethod))
11891 INLINE_FAILURE ("linkdemand");
11892 CHECK_CFG_EXCEPTION;
11893 } else if (mono_security_core_clr_enabled ()) {
11894 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11898 * Optimize the common case of ldvirtftn+delegate creation
11900 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
11901 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11902 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11903 MonoInst *target_ins, *handle_ins;
11904 MonoMethod *invoke;
11905 int invoke_context_used;
11907 invoke = mono_get_delegate_invoke (ctor_method->klass);
11908 if (!invoke || !mono_method_signature (invoke))
11911 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11913 target_ins = sp [-1];
11915 if (mono_security_core_clr_enabled ())
11916 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11918 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11919 /* FIXME: SGEN support */
11920 if (invoke_context_used == 0) {
11922 if (cfg->verbose_level > 3)
11923 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11924 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
11927 CHECK_CFG_EXCEPTION;
11941 args [1] = emit_get_rgctx_method (cfg, context_used,
11942 cmethod, MONO_RGCTX_INFO_METHOD);
11945 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11947 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11950 inline_costs += 10 * num_calls++;
11954 CHECK_STACK_OVF (1);
11956 n = read16 (ip + 2);
11958 EMIT_NEW_ARGLOAD (cfg, ins, n);
11963 CHECK_STACK_OVF (1);
11965 n = read16 (ip + 2);
11967 NEW_ARGLOADA (cfg, ins, n);
11968 MONO_ADD_INS (cfg->cbb, ins);
11976 n = read16 (ip + 2);
11978 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11980 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11984 CHECK_STACK_OVF (1);
11986 n = read16 (ip + 2);
11988 EMIT_NEW_LOCLOAD (cfg, ins, n);
11993 unsigned char *tmp_ip;
11994 CHECK_STACK_OVF (1);
11996 n = read16 (ip + 2);
11999 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12005 EMIT_NEW_LOCLOADA (cfg, ins, n);
12014 n = read16 (ip + 2);
12016 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12018 emit_stloc_ir (cfg, sp, header, n);
12025 if (sp != stack_start)
12027 if (cfg->method != method)
12029 * Inlining this into a loop in a parent could lead to
12030 * stack overflows which is different behavior than the
12031 * non-inlined case, thus disable inlining in this case.
12033 INLINE_FAILURE("localloc");
12035 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12036 ins->dreg = alloc_preg (cfg);
12037 ins->sreg1 = sp [0]->dreg;
12038 ins->type = STACK_PTR;
12039 MONO_ADD_INS (cfg->cbb, ins);
12041 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12043 ins->flags |= MONO_INST_INIT;
12048 case CEE_ENDFILTER: {
12049 MonoExceptionClause *clause, *nearest;
12050 int cc, nearest_num;
12054 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12056 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12057 ins->sreg1 = (*sp)->dreg;
12058 MONO_ADD_INS (bblock, ins);
12059 start_new_bblock = 1;
12064 for (cc = 0; cc < header->num_clauses; ++cc) {
12065 clause = &header->clauses [cc];
12066 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12067 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12068 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12073 g_assert (nearest);
12074 if ((ip - header->code) != nearest->handler_offset)
12079 case CEE_UNALIGNED_:
12080 ins_flag |= MONO_INST_UNALIGNED;
12081 /* FIXME: record alignment? we can assume 1 for now */
12085 case CEE_VOLATILE_:
12086 ins_flag |= MONO_INST_VOLATILE;
12090 ins_flag |= MONO_INST_TAILCALL;
12091 cfg->flags |= MONO_CFG_HAS_TAIL;
12092 /* Can't inline tail calls at this time */
12093 inline_costs += 100000;
12100 token = read32 (ip + 2);
12101 klass = mini_get_class (method, token, generic_context);
12102 CHECK_TYPELOAD (klass);
12103 if (generic_class_is_reference_type (cfg, klass))
12104 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12106 mini_emit_initobj (cfg, *sp, NULL, klass);
12110 case CEE_CONSTRAINED_:
12112 token = read32 (ip + 2);
12113 constrained_call = mini_get_class (method, token, generic_context);
12114 CHECK_TYPELOAD (constrained_call);
12118 case CEE_INITBLK: {
12119 MonoInst *iargs [3];
12123 /* Skip optimized paths for volatile operations. */
12124 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12125 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12126 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12127 /* emit_memset only works when val == 0 */
12128 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12131 iargs [0] = sp [0];
12132 iargs [1] = sp [1];
12133 iargs [2] = sp [2];
12134 if (ip [1] == CEE_CPBLK) {
12136 * FIXME: It's unclear whether we should be emitting both the acquire
12137 * and release barriers for cpblk. It is technically both a load and
12138 * store operation, so it seems like that's the sensible thing to do.
12140 MonoMethod *memcpy_method = get_memcpy_method ();
12141 if (ins_flag & MONO_INST_VOLATILE) {
12142 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12143 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12144 emit_memory_barrier (cfg, FullBarrier);
12146 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12147 call->flags |= ins_flag;
12148 if (ins_flag & MONO_INST_VOLATILE) {
12149 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
12150 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12151 emit_memory_barrier (cfg, FullBarrier);
12154 MonoMethod *memset_method = get_memset_method ();
12155 if (ins_flag & MONO_INST_VOLATILE) {
12156 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12157 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12158 emit_memory_barrier (cfg, FullBarrier);
12160 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12161 call->flags |= ins_flag;
12172 ins_flag |= MONO_INST_NOTYPECHECK;
12174 ins_flag |= MONO_INST_NORANGECHECK;
12175 /* we ignore the no-nullcheck for now since we
12176 * really do it explicitly only when doing callvirt->call
12180 case CEE_RETHROW: {
12182 int handler_offset = -1;
12184 for (i = 0; i < header->num_clauses; ++i) {
12185 MonoExceptionClause *clause = &header->clauses [i];
12186 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12187 handler_offset = clause->handler_offset;
12192 bblock->flags |= BB_EXCEPTION_UNSAFE;
12194 if (handler_offset == -1)
12197 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12198 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12199 ins->sreg1 = load->dreg;
12200 MONO_ADD_INS (bblock, ins);
12202 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12203 MONO_ADD_INS (bblock, ins);
12206 link_bblock (cfg, bblock, end_bblock);
12207 start_new_bblock = 1;
12215 CHECK_STACK_OVF (1);
12217 token = read32 (ip + 2);
12218 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12219 MonoType *type = mono_type_create_from_typespec_checked (image, token, &error);
12220 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12224 val = mono_type_size (type, &ialign);
12226 MonoClass *klass = mono_class_get_and_inflate_typespec_checked (image, token, generic_context, &error);
12227 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12228 CHECK_TYPELOAD (klass);
12229 mono_class_init (klass);
12230 val = mono_type_size (&klass->byval_arg, &ialign);
12232 if (mini_is_gsharedvt_klass (cfg, klass))
12233 GSHAREDVT_FAILURE (*ip);
12235 EMIT_NEW_ICONST (cfg, ins, val);
12240 case CEE_REFANYTYPE: {
12241 MonoInst *src_var, *src;
12243 GSHAREDVT_FAILURE (*ip);
12249 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12251 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12252 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12253 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12258 case CEE_READONLY_:
12271 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12281 g_warning ("opcode 0x%02x not handled", *ip);
12285 if (start_new_bblock != 1)
12288 bblock->cil_length = ip - bblock->cil_code;
12289 if (bblock->next_bb) {
12290 /* This could already be set because of inlining, #693905 */
12291 MonoBasicBlock *bb = bblock;
12293 while (bb->next_bb)
12295 bb->next_bb = end_bblock;
12297 bblock->next_bb = end_bblock;
12300 if (cfg->method == method && cfg->domainvar) {
12302 MonoInst *get_domain;
12304 cfg->cbb = init_localsbb;
12306 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12307 MONO_ADD_INS (cfg->cbb, get_domain);
12309 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12311 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12312 MONO_ADD_INS (cfg->cbb, store);
12315 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12316 if (cfg->compile_aot)
12317 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12318 mono_get_got_var (cfg);
12321 if (cfg->method == method && cfg->got_var)
12322 mono_emit_load_got_addr (cfg);
12324 if (init_localsbb) {
12325 cfg->cbb = init_localsbb;
12327 for (i = 0; i < header->num_locals; ++i) {
12328 emit_init_local (cfg, i, header->locals [i], init_locals);
12332 if (cfg->init_ref_vars && cfg->method == method) {
12333 /* Emit initialization for ref vars */
12334 // FIXME: Avoid duplication initialization for IL locals.
12335 for (i = 0; i < cfg->num_varinfo; ++i) {
12336 MonoInst *ins = cfg->varinfo [i];
12338 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12339 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12343 if (cfg->lmf_var && cfg->method == method) {
12344 cfg->cbb = init_localsbb;
12345 emit_push_lmf (cfg);
12348 cfg->cbb = init_localsbb;
12349 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12352 MonoBasicBlock *bb;
12355 * Make seq points at backward branch targets interruptable.
12357 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12358 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12359 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12362 /* Add a sequence point for method entry/exit events */
12364 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12365 MONO_ADD_INS (init_localsbb, ins);
12366 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12367 MONO_ADD_INS (cfg->bb_exit, ins);
12371 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12372 * the code they refer to was dead (#11880).
12374 if (sym_seq_points) {
12375 for (i = 0; i < header->code_size; ++i) {
12376 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12379 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12380 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12387 if (cfg->method == method) {
12388 MonoBasicBlock *bb;
12389 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12390 bb->region = mono_find_block_region (cfg, bb->real_offset);
12392 mono_create_spvar_for_region (cfg, bb->region);
12393 if (cfg->verbose_level > 2)
12394 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12398 if (inline_costs < 0) {
12401 /* Method is too large */
12402 mname = mono_method_full_name (method, TRUE);
12403 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12404 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12408 if ((cfg->verbose_level > 2) && (cfg->method == method))
12409 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12414 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12418 set_exception_type_from_invalid_il (cfg, method, ip);
12422 g_slist_free (class_inits);
12423 mono_basic_block_free (original_bb);
12424 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12425 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12426 if (cfg->exception_type)
12429 return inline_costs;
12433 store_membase_reg_to_store_membase_imm (int opcode)
12436 case OP_STORE_MEMBASE_REG:
12437 return OP_STORE_MEMBASE_IMM;
12438 case OP_STOREI1_MEMBASE_REG:
12439 return OP_STOREI1_MEMBASE_IMM;
12440 case OP_STOREI2_MEMBASE_REG:
12441 return OP_STOREI2_MEMBASE_IMM;
12442 case OP_STOREI4_MEMBASE_REG:
12443 return OP_STOREI4_MEMBASE_IMM;
12444 case OP_STOREI8_MEMBASE_REG:
12445 return OP_STOREI8_MEMBASE_IMM;
12447 g_assert_not_reached ();
12454 mono_op_to_op_imm (int opcode)
12458 return OP_IADD_IMM;
12460 return OP_ISUB_IMM;
12462 return OP_IDIV_IMM;
12464 return OP_IDIV_UN_IMM;
12466 return OP_IREM_IMM;
12468 return OP_IREM_UN_IMM;
12470 return OP_IMUL_IMM;
12472 return OP_IAND_IMM;
12476 return OP_IXOR_IMM;
12478 return OP_ISHL_IMM;
12480 return OP_ISHR_IMM;
12482 return OP_ISHR_UN_IMM;
12485 return OP_LADD_IMM;
12487 return OP_LSUB_IMM;
12489 return OP_LAND_IMM;
12493 return OP_LXOR_IMM;
12495 return OP_LSHL_IMM;
12497 return OP_LSHR_IMM;
12499 return OP_LSHR_UN_IMM;
12500 #if SIZEOF_REGISTER == 8
12502 return OP_LREM_IMM;
12506 return OP_COMPARE_IMM;
12508 return OP_ICOMPARE_IMM;
12510 return OP_LCOMPARE_IMM;
12512 case OP_STORE_MEMBASE_REG:
12513 return OP_STORE_MEMBASE_IMM;
12514 case OP_STOREI1_MEMBASE_REG:
12515 return OP_STOREI1_MEMBASE_IMM;
12516 case OP_STOREI2_MEMBASE_REG:
12517 return OP_STOREI2_MEMBASE_IMM;
12518 case OP_STOREI4_MEMBASE_REG:
12519 return OP_STOREI4_MEMBASE_IMM;
12521 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12523 return OP_X86_PUSH_IMM;
12524 case OP_X86_COMPARE_MEMBASE_REG:
12525 return OP_X86_COMPARE_MEMBASE_IMM;
12527 #if defined(TARGET_AMD64)
12528 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12529 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12531 case OP_VOIDCALL_REG:
12532 return OP_VOIDCALL;
12540 return OP_LOCALLOC_IMM;
12547 ldind_to_load_membase (int opcode)
12551 return OP_LOADI1_MEMBASE;
12553 return OP_LOADU1_MEMBASE;
12555 return OP_LOADI2_MEMBASE;
12557 return OP_LOADU2_MEMBASE;
12559 return OP_LOADI4_MEMBASE;
12561 return OP_LOADU4_MEMBASE;
12563 return OP_LOAD_MEMBASE;
12564 case CEE_LDIND_REF:
12565 return OP_LOAD_MEMBASE;
12567 return OP_LOADI8_MEMBASE;
12569 return OP_LOADR4_MEMBASE;
12571 return OP_LOADR8_MEMBASE;
12573 g_assert_not_reached ();
12580 stind_to_store_membase (int opcode)
12584 return OP_STOREI1_MEMBASE_REG;
12586 return OP_STOREI2_MEMBASE_REG;
12588 return OP_STOREI4_MEMBASE_REG;
12590 case CEE_STIND_REF:
12591 return OP_STORE_MEMBASE_REG;
12593 return OP_STOREI8_MEMBASE_REG;
12595 return OP_STORER4_MEMBASE_REG;
12597 return OP_STORER8_MEMBASE_REG;
12599 g_assert_not_reached ();
12606 mono_load_membase_to_load_mem (int opcode)
12608 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12609 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12611 case OP_LOAD_MEMBASE:
12612 return OP_LOAD_MEM;
12613 case OP_LOADU1_MEMBASE:
12614 return OP_LOADU1_MEM;
12615 case OP_LOADU2_MEMBASE:
12616 return OP_LOADU2_MEM;
12617 case OP_LOADI4_MEMBASE:
12618 return OP_LOADI4_MEM;
12619 case OP_LOADU4_MEMBASE:
12620 return OP_LOADU4_MEM;
12621 #if SIZEOF_REGISTER == 8
12622 case OP_LOADI8_MEMBASE:
12623 return OP_LOADI8_MEM;
12632 op_to_op_dest_membase (int store_opcode, int opcode)
12634 #if defined(TARGET_X86)
12635 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12640 return OP_X86_ADD_MEMBASE_REG;
12642 return OP_X86_SUB_MEMBASE_REG;
12644 return OP_X86_AND_MEMBASE_REG;
12646 return OP_X86_OR_MEMBASE_REG;
12648 return OP_X86_XOR_MEMBASE_REG;
12651 return OP_X86_ADD_MEMBASE_IMM;
12654 return OP_X86_SUB_MEMBASE_IMM;
12657 return OP_X86_AND_MEMBASE_IMM;
12660 return OP_X86_OR_MEMBASE_IMM;
12663 return OP_X86_XOR_MEMBASE_IMM;
12669 #if defined(TARGET_AMD64)
12670 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12675 return OP_X86_ADD_MEMBASE_REG;
12677 return OP_X86_SUB_MEMBASE_REG;
12679 return OP_X86_AND_MEMBASE_REG;
12681 return OP_X86_OR_MEMBASE_REG;
12683 return OP_X86_XOR_MEMBASE_REG;
12685 return OP_X86_ADD_MEMBASE_IMM;
12687 return OP_X86_SUB_MEMBASE_IMM;
12689 return OP_X86_AND_MEMBASE_IMM;
12691 return OP_X86_OR_MEMBASE_IMM;
12693 return OP_X86_XOR_MEMBASE_IMM;
12695 return OP_AMD64_ADD_MEMBASE_REG;
12697 return OP_AMD64_SUB_MEMBASE_REG;
12699 return OP_AMD64_AND_MEMBASE_REG;
12701 return OP_AMD64_OR_MEMBASE_REG;
12703 return OP_AMD64_XOR_MEMBASE_REG;
12706 return OP_AMD64_ADD_MEMBASE_IMM;
12709 return OP_AMD64_SUB_MEMBASE_IMM;
12712 return OP_AMD64_AND_MEMBASE_IMM;
12715 return OP_AMD64_OR_MEMBASE_IMM;
12718 return OP_AMD64_XOR_MEMBASE_IMM;
12728 op_to_op_store_membase (int store_opcode, int opcode)
12730 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12733 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12734 return OP_X86_SETEQ_MEMBASE;
12736 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12737 return OP_X86_SETNE_MEMBASE;
12745 op_to_op_src1_membase (int load_opcode, int opcode)
12748 /* FIXME: This has sign extension issues */
12750 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12751 return OP_X86_COMPARE_MEMBASE8_IMM;
12754 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12759 return OP_X86_PUSH_MEMBASE;
12760 case OP_COMPARE_IMM:
12761 case OP_ICOMPARE_IMM:
12762 return OP_X86_COMPARE_MEMBASE_IMM;
12765 return OP_X86_COMPARE_MEMBASE_REG;
12769 #ifdef TARGET_AMD64
12770 /* FIXME: This has sign extension issues */
12772 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12773 return OP_X86_COMPARE_MEMBASE8_IMM;
12778 #ifdef __mono_ilp32__
12779 if (load_opcode == OP_LOADI8_MEMBASE)
12781 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12783 return OP_X86_PUSH_MEMBASE;
12785 /* FIXME: This only works for 32 bit immediates
12786 case OP_COMPARE_IMM:
12787 case OP_LCOMPARE_IMM:
12788 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12789 return OP_AMD64_COMPARE_MEMBASE_IMM;
12791 case OP_ICOMPARE_IMM:
12792 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12793 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12797 #ifdef __mono_ilp32__
12798 if (load_opcode == OP_LOAD_MEMBASE)
12799 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12800 if (load_opcode == OP_LOADI8_MEMBASE)
12802 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12804 return OP_AMD64_COMPARE_MEMBASE_REG;
12807 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12808 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12817 op_to_op_src2_membase (int load_opcode, int opcode)
12820 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12826 return OP_X86_COMPARE_REG_MEMBASE;
12828 return OP_X86_ADD_REG_MEMBASE;
12830 return OP_X86_SUB_REG_MEMBASE;
12832 return OP_X86_AND_REG_MEMBASE;
12834 return OP_X86_OR_REG_MEMBASE;
12836 return OP_X86_XOR_REG_MEMBASE;
12840 #ifdef TARGET_AMD64
12841 #ifdef __mono_ilp32__
12842 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12844 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12848 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12850 return OP_X86_ADD_REG_MEMBASE;
12852 return OP_X86_SUB_REG_MEMBASE;
12854 return OP_X86_AND_REG_MEMBASE;
12856 return OP_X86_OR_REG_MEMBASE;
12858 return OP_X86_XOR_REG_MEMBASE;
12860 #ifdef __mono_ilp32__
12861 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12863 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12868 return OP_AMD64_COMPARE_REG_MEMBASE;
12870 return OP_AMD64_ADD_REG_MEMBASE;
12872 return OP_AMD64_SUB_REG_MEMBASE;
12874 return OP_AMD64_AND_REG_MEMBASE;
12876 return OP_AMD64_OR_REG_MEMBASE;
12878 return OP_AMD64_XOR_REG_MEMBASE;
12887 mono_op_to_op_imm_noemul (int opcode)
12890 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12896 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12903 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12908 return mono_op_to_op_imm (opcode);
12913 * mono_handle_global_vregs:
12915 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12919 mono_handle_global_vregs (MonoCompile *cfg)
12921 gint32 *vreg_to_bb;
12922 MonoBasicBlock *bb;
12925 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12927 #ifdef MONO_ARCH_SIMD_INTRINSICS
12928 if (cfg->uses_simd_intrinsics)
12929 mono_simd_simplify_indirection (cfg);
12932 /* Find local vregs used in more than one bb */
12933 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12934 MonoInst *ins = bb->code;
12935 int block_num = bb->block_num;
12937 if (cfg->verbose_level > 2)
12938 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12941 for (; ins; ins = ins->next) {
12942 const char *spec = INS_INFO (ins->opcode);
12943 int regtype = 0, regindex;
12946 if (G_UNLIKELY (cfg->verbose_level > 2))
12947 mono_print_ins (ins);
12949 g_assert (ins->opcode >= MONO_CEE_LAST);
12951 for (regindex = 0; regindex < 4; regindex ++) {
12954 if (regindex == 0) {
12955 regtype = spec [MONO_INST_DEST];
12956 if (regtype == ' ')
12959 } else if (regindex == 1) {
12960 regtype = spec [MONO_INST_SRC1];
12961 if (regtype == ' ')
12964 } else if (regindex == 2) {
12965 regtype = spec [MONO_INST_SRC2];
12966 if (regtype == ' ')
12969 } else if (regindex == 3) {
12970 regtype = spec [MONO_INST_SRC3];
12971 if (regtype == ' ')
12976 #if SIZEOF_REGISTER == 4
12977 /* In the LLVM case, the long opcodes are not decomposed */
12978 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12980 * Since some instructions reference the original long vreg,
12981 * and some reference the two component vregs, it is quite hard
12982 * to determine when it needs to be global. So be conservative.
12984 if (!get_vreg_to_inst (cfg, vreg)) {
12985 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12987 if (cfg->verbose_level > 2)
12988 printf ("LONG VREG R%d made global.\n", vreg);
12992 * Make the component vregs volatile since the optimizations can
12993 * get confused otherwise.
12995 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12996 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13000 g_assert (vreg != -1);
13002 prev_bb = vreg_to_bb [vreg];
13003 if (prev_bb == 0) {
13004 /* 0 is a valid block num */
13005 vreg_to_bb [vreg] = block_num + 1;
13006 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13007 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13010 if (!get_vreg_to_inst (cfg, vreg)) {
13011 if (G_UNLIKELY (cfg->verbose_level > 2))
13012 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13016 if (vreg_is_ref (cfg, vreg))
13017 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13019 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13022 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13025 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13028 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13031 g_assert_not_reached ();
13035 /* Flag as having been used in more than one bb */
13036 vreg_to_bb [vreg] = -1;
13042 /* If a variable is used in only one bblock, convert it into a local vreg */
13043 for (i = 0; i < cfg->num_varinfo; i++) {
13044 MonoInst *var = cfg->varinfo [i];
13045 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13047 switch (var->type) {
13053 #if SIZEOF_REGISTER == 8
13056 #if !defined(TARGET_X86)
13057 /* Enabling this screws up the fp stack on x86 */
13060 if (mono_arch_is_soft_float ())
13063 /* Arguments are implicitly global */
13064 /* Putting R4 vars into registers doesn't work currently */
13065 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13066 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13068 * Make that the variable's liveness interval doesn't contain a call, since
13069 * that would cause the lvreg to be spilled, making the whole optimization
13072 /* This is too slow for JIT compilation */
13074 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13076 int def_index, call_index, ins_index;
13077 gboolean spilled = FALSE;
13082 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13083 const char *spec = INS_INFO (ins->opcode);
13085 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13086 def_index = ins_index;
13088 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13089 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13090 if (call_index > def_index) {
13096 if (MONO_IS_CALL (ins))
13097 call_index = ins_index;
13107 if (G_UNLIKELY (cfg->verbose_level > 2))
13108 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13109 var->flags |= MONO_INST_IS_DEAD;
13110 cfg->vreg_to_inst [var->dreg] = NULL;
13117 * Compress the varinfo and vars tables so the liveness computation is faster and
13118 * takes up less space.
13121 for (i = 0; i < cfg->num_varinfo; ++i) {
13122 MonoInst *var = cfg->varinfo [i];
13123 if (pos < i && cfg->locals_start == i)
13124 cfg->locals_start = pos;
13125 if (!(var->flags & MONO_INST_IS_DEAD)) {
13127 cfg->varinfo [pos] = cfg->varinfo [i];
13128 cfg->varinfo [pos]->inst_c0 = pos;
13129 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13130 cfg->vars [pos].idx = pos;
13131 #if SIZEOF_REGISTER == 4
13132 if (cfg->varinfo [pos]->type == STACK_I8) {
13133 /* Modify the two component vars too */
13136 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13137 var1->inst_c0 = pos;
13138 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13139 var1->inst_c0 = pos;
13146 cfg->num_varinfo = pos;
13147 if (cfg->locals_start > cfg->num_varinfo)
13148 cfg->locals_start = cfg->num_varinfo;
13152 * mono_spill_global_vars:
13154 * Generate spill code for variables which are not allocated to registers,
13155 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13156 * code is generated which could be optimized by the local optimization passes.
13159 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13161 MonoBasicBlock *bb;
13163 int orig_next_vreg;
13164 guint32 *vreg_to_lvreg;
13166 guint32 i, lvregs_len;
13167 gboolean dest_has_lvreg = FALSE;
13168 guint32 stacktypes [128];
13169 MonoInst **live_range_start, **live_range_end;
13170 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13171 int *gsharedvt_vreg_to_idx = NULL;
13173 *need_local_opts = FALSE;
13175 memset (spec2, 0, sizeof (spec2));
13177 /* FIXME: Move this function to mini.c */
13178 stacktypes ['i'] = STACK_PTR;
13179 stacktypes ['l'] = STACK_I8;
13180 stacktypes ['f'] = STACK_R8;
13181 #ifdef MONO_ARCH_SIMD_INTRINSICS
13182 stacktypes ['x'] = STACK_VTYPE;
13185 #if SIZEOF_REGISTER == 4
13186 /* Create MonoInsts for longs */
13187 for (i = 0; i < cfg->num_varinfo; i++) {
13188 MonoInst *ins = cfg->varinfo [i];
13190 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13191 switch (ins->type) {
13196 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13199 g_assert (ins->opcode == OP_REGOFFSET);
13201 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13203 tree->opcode = OP_REGOFFSET;
13204 tree->inst_basereg = ins->inst_basereg;
13205 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13207 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13209 tree->opcode = OP_REGOFFSET;
13210 tree->inst_basereg = ins->inst_basereg;
13211 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13221 if (cfg->compute_gc_maps) {
13222 /* registers need liveness info even for !non refs */
13223 for (i = 0; i < cfg->num_varinfo; i++) {
13224 MonoInst *ins = cfg->varinfo [i];
13226 if (ins->opcode == OP_REGVAR)
13227 ins->flags |= MONO_INST_GC_TRACK;
13231 if (cfg->gsharedvt) {
13232 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13234 for (i = 0; i < cfg->num_varinfo; ++i) {
13235 MonoInst *ins = cfg->varinfo [i];
13238 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13239 if (i >= cfg->locals_start) {
13241 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13242 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13243 ins->opcode = OP_GSHAREDVT_LOCAL;
13244 ins->inst_imm = idx;
13247 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13248 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13254 /* FIXME: widening and truncation */
13257 * As an optimization, when a variable allocated to the stack is first loaded into
13258 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13259 * the variable again.
13261 orig_next_vreg = cfg->next_vreg;
13262 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13263 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13267 * These arrays contain the first and last instructions accessing a given
13269 * Since we emit bblocks in the same order we process them here, and we
13270 * don't split live ranges, these will precisely describe the live range of
13271 * the variable, i.e. the instruction range where a valid value can be found
13272 * in the variables location.
13273 * The live range is computed using the liveness info computed by the liveness pass.
13274 * We can't use vmv->range, since that is an abstract live range, and we need
13275 * one which is instruction precise.
13276 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13278 /* FIXME: Only do this if debugging info is requested */
13279 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13280 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13281 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13282 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13284 /* Add spill loads/stores */
13285 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13288 if (cfg->verbose_level > 2)
13289 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13291 /* Clear vreg_to_lvreg array */
13292 for (i = 0; i < lvregs_len; i++)
13293 vreg_to_lvreg [lvregs [i]] = 0;
13297 MONO_BB_FOR_EACH_INS (bb, ins) {
13298 const char *spec = INS_INFO (ins->opcode);
13299 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13300 gboolean store, no_lvreg;
13301 int sregs [MONO_MAX_SRC_REGS];
13303 if (G_UNLIKELY (cfg->verbose_level > 2))
13304 mono_print_ins (ins);
13306 if (ins->opcode == OP_NOP)
13310 * We handle LDADDR here as well, since it can only be decomposed
13311 * when variable addresses are known.
13313 if (ins->opcode == OP_LDADDR) {
13314 MonoInst *var = ins->inst_p0;
13316 if (var->opcode == OP_VTARG_ADDR) {
13317 /* Happens on SPARC/S390 where vtypes are passed by reference */
13318 MonoInst *vtaddr = var->inst_left;
13319 if (vtaddr->opcode == OP_REGVAR) {
13320 ins->opcode = OP_MOVE;
13321 ins->sreg1 = vtaddr->dreg;
13323 else if (var->inst_left->opcode == OP_REGOFFSET) {
13324 ins->opcode = OP_LOAD_MEMBASE;
13325 ins->inst_basereg = vtaddr->inst_basereg;
13326 ins->inst_offset = vtaddr->inst_offset;
13329 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13330 /* gsharedvt arg passed by ref */
13331 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13333 ins->opcode = OP_LOAD_MEMBASE;
13334 ins->inst_basereg = var->inst_basereg;
13335 ins->inst_offset = var->inst_offset;
13336 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13337 MonoInst *load, *load2, *load3;
13338 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13339 int reg1, reg2, reg3;
13340 MonoInst *info_var = cfg->gsharedvt_info_var;
13341 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13345 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13348 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13350 g_assert (info_var);
13351 g_assert (locals_var);
13353 /* Mark the instruction used to compute the locals var as used */
13354 cfg->gsharedvt_locals_var_ins = NULL;
13356 /* Load the offset */
13357 if (info_var->opcode == OP_REGOFFSET) {
13358 reg1 = alloc_ireg (cfg);
13359 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13360 } else if (info_var->opcode == OP_REGVAR) {
13362 reg1 = info_var->dreg;
13364 g_assert_not_reached ();
13366 reg2 = alloc_ireg (cfg);
13367 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13368 /* Load the locals area address */
13369 reg3 = alloc_ireg (cfg);
13370 if (locals_var->opcode == OP_REGOFFSET) {
13371 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13372 } else if (locals_var->opcode == OP_REGVAR) {
13373 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13375 g_assert_not_reached ();
13377 /* Compute the address */
13378 ins->opcode = OP_PADD;
13382 mono_bblock_insert_before_ins (bb, ins, load3);
13383 mono_bblock_insert_before_ins (bb, load3, load2);
13385 mono_bblock_insert_before_ins (bb, load2, load);
13387 g_assert (var->opcode == OP_REGOFFSET);
13389 ins->opcode = OP_ADD_IMM;
13390 ins->sreg1 = var->inst_basereg;
13391 ins->inst_imm = var->inst_offset;
13394 *need_local_opts = TRUE;
13395 spec = INS_INFO (ins->opcode);
13398 if (ins->opcode < MONO_CEE_LAST) {
13399 mono_print_ins (ins);
13400 g_assert_not_reached ();
13404 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13408 if (MONO_IS_STORE_MEMBASE (ins)) {
13409 tmp_reg = ins->dreg;
13410 ins->dreg = ins->sreg2;
13411 ins->sreg2 = tmp_reg;
13414 spec2 [MONO_INST_DEST] = ' ';
13415 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13416 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13417 spec2 [MONO_INST_SRC3] = ' ';
13419 } else if (MONO_IS_STORE_MEMINDEX (ins))
13420 g_assert_not_reached ();
13425 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13426 printf ("\t %.3s %d", spec, ins->dreg);
13427 num_sregs = mono_inst_get_src_registers (ins, sregs);
13428 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13429 printf (" %d", sregs [srcindex]);
13436 regtype = spec [MONO_INST_DEST];
13437 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13440 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13441 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13442 MonoInst *store_ins;
13444 MonoInst *def_ins = ins;
13445 int dreg = ins->dreg; /* The original vreg */
13447 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13449 if (var->opcode == OP_REGVAR) {
13450 ins->dreg = var->dreg;
13451 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13453 * Instead of emitting a load+store, use a _membase opcode.
13455 g_assert (var->opcode == OP_REGOFFSET);
13456 if (ins->opcode == OP_MOVE) {
13460 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13461 ins->inst_basereg = var->inst_basereg;
13462 ins->inst_offset = var->inst_offset;
13465 spec = INS_INFO (ins->opcode);
13469 g_assert (var->opcode == OP_REGOFFSET);
13471 prev_dreg = ins->dreg;
13473 /* Invalidate any previous lvreg for this vreg */
13474 vreg_to_lvreg [ins->dreg] = 0;
13478 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13480 store_opcode = OP_STOREI8_MEMBASE_REG;
13483 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13485 #if SIZEOF_REGISTER != 8
13486 if (regtype == 'l') {
13487 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13488 mono_bblock_insert_after_ins (bb, ins, store_ins);
13489 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13490 mono_bblock_insert_after_ins (bb, ins, store_ins);
13491 def_ins = store_ins;
13496 g_assert (store_opcode != OP_STOREV_MEMBASE);
13498 /* Try to fuse the store into the instruction itself */
13499 /* FIXME: Add more instructions */
13500 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13501 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13502 ins->inst_imm = ins->inst_c0;
13503 ins->inst_destbasereg = var->inst_basereg;
13504 ins->inst_offset = var->inst_offset;
13505 spec = INS_INFO (ins->opcode);
13506 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13507 ins->opcode = store_opcode;
13508 ins->inst_destbasereg = var->inst_basereg;
13509 ins->inst_offset = var->inst_offset;
13513 tmp_reg = ins->dreg;
13514 ins->dreg = ins->sreg2;
13515 ins->sreg2 = tmp_reg;
13518 spec2 [MONO_INST_DEST] = ' ';
13519 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13520 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13521 spec2 [MONO_INST_SRC3] = ' ';
13523 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13524 // FIXME: The backends expect the base reg to be in inst_basereg
13525 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13527 ins->inst_basereg = var->inst_basereg;
13528 ins->inst_offset = var->inst_offset;
13529 spec = INS_INFO (ins->opcode);
13531 /* printf ("INS: "); mono_print_ins (ins); */
13532 /* Create a store instruction */
13533 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13535 /* Insert it after the instruction */
13536 mono_bblock_insert_after_ins (bb, ins, store_ins);
13538 def_ins = store_ins;
13541 * We can't assign ins->dreg to var->dreg here, since the
13542 * sregs could use it. So set a flag, and do it after
13545 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13546 dest_has_lvreg = TRUE;
13551 if (def_ins && !live_range_start [dreg]) {
13552 live_range_start [dreg] = def_ins;
13553 live_range_start_bb [dreg] = bb;
13556 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13559 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13560 tmp->inst_c1 = dreg;
13561 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13568 num_sregs = mono_inst_get_src_registers (ins, sregs);
13569 for (srcindex = 0; srcindex < 3; ++srcindex) {
13570 regtype = spec [MONO_INST_SRC1 + srcindex];
13571 sreg = sregs [srcindex];
13573 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13574 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13575 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13576 MonoInst *use_ins = ins;
13577 MonoInst *load_ins;
13578 guint32 load_opcode;
13580 if (var->opcode == OP_REGVAR) {
13581 sregs [srcindex] = var->dreg;
13582 //mono_inst_set_src_registers (ins, sregs);
13583 live_range_end [sreg] = use_ins;
13584 live_range_end_bb [sreg] = bb;
13586 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13589 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13590 /* var->dreg is a hreg */
13591 tmp->inst_c1 = sreg;
13592 mono_bblock_insert_after_ins (bb, ins, tmp);
13598 g_assert (var->opcode == OP_REGOFFSET);
13600 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13602 g_assert (load_opcode != OP_LOADV_MEMBASE);
13604 if (vreg_to_lvreg [sreg]) {
13605 g_assert (vreg_to_lvreg [sreg] != -1);
13607 /* The variable is already loaded to an lvreg */
13608 if (G_UNLIKELY (cfg->verbose_level > 2))
13609 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13610 sregs [srcindex] = vreg_to_lvreg [sreg];
13611 //mono_inst_set_src_registers (ins, sregs);
13615 /* Try to fuse the load into the instruction */
13616 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13617 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13618 sregs [0] = var->inst_basereg;
13619 //mono_inst_set_src_registers (ins, sregs);
13620 ins->inst_offset = var->inst_offset;
13621 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13622 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13623 sregs [1] = var->inst_basereg;
13624 //mono_inst_set_src_registers (ins, sregs);
13625 ins->inst_offset = var->inst_offset;
13627 if (MONO_IS_REAL_MOVE (ins)) {
13628 ins->opcode = OP_NOP;
13631 //printf ("%d ", srcindex); mono_print_ins (ins);
13633 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13635 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13636 if (var->dreg == prev_dreg) {
13638 * sreg refers to the value loaded by the load
13639 * emitted below, but we need to use ins->dreg
13640 * since it refers to the store emitted earlier.
13644 g_assert (sreg != -1);
13645 vreg_to_lvreg [var->dreg] = sreg;
13646 g_assert (lvregs_len < 1024);
13647 lvregs [lvregs_len ++] = var->dreg;
13651 sregs [srcindex] = sreg;
13652 //mono_inst_set_src_registers (ins, sregs);
13654 #if SIZEOF_REGISTER != 8
13655 if (regtype == 'l') {
13656 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13657 mono_bblock_insert_before_ins (bb, ins, load_ins);
13658 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13659 mono_bblock_insert_before_ins (bb, ins, load_ins);
13660 use_ins = load_ins;
13665 #if SIZEOF_REGISTER == 4
13666 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13668 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13669 mono_bblock_insert_before_ins (bb, ins, load_ins);
13670 use_ins = load_ins;
13674 if (var->dreg < orig_next_vreg) {
13675 live_range_end [var->dreg] = use_ins;
13676 live_range_end_bb [var->dreg] = bb;
13679 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13682 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13683 tmp->inst_c1 = var->dreg;
13684 mono_bblock_insert_after_ins (bb, ins, tmp);
13688 mono_inst_set_src_registers (ins, sregs);
13690 if (dest_has_lvreg) {
13691 g_assert (ins->dreg != -1);
13692 vreg_to_lvreg [prev_dreg] = ins->dreg;
13693 g_assert (lvregs_len < 1024);
13694 lvregs [lvregs_len ++] = prev_dreg;
13695 dest_has_lvreg = FALSE;
13699 tmp_reg = ins->dreg;
13700 ins->dreg = ins->sreg2;
13701 ins->sreg2 = tmp_reg;
13704 if (MONO_IS_CALL (ins)) {
13705 /* Clear vreg_to_lvreg array */
13706 for (i = 0; i < lvregs_len; i++)
13707 vreg_to_lvreg [lvregs [i]] = 0;
13709 } else if (ins->opcode == OP_NOP) {
13711 MONO_INST_NULLIFY_SREGS (ins);
13714 if (cfg->verbose_level > 2)
13715 mono_print_ins_index (1, ins);
13718 /* Extend the live range based on the liveness info */
13719 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13720 for (i = 0; i < cfg->num_varinfo; i ++) {
13721 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13723 if (vreg_is_volatile (cfg, vi->vreg))
13724 /* The liveness info is incomplete */
13727 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13728 /* Live from at least the first ins of this bb */
13729 live_range_start [vi->vreg] = bb->code;
13730 live_range_start_bb [vi->vreg] = bb;
13733 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13734 /* Live at least until the last ins of this bb */
13735 live_range_end [vi->vreg] = bb->last_ins;
13736 live_range_end_bb [vi->vreg] = bb;
13742 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13744 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13745 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13747 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13748 for (i = 0; i < cfg->num_varinfo; ++i) {
13749 int vreg = MONO_VARINFO (cfg, i)->vreg;
13752 if (live_range_start [vreg]) {
13753 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13755 ins->inst_c1 = vreg;
13756 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13758 if (live_range_end [vreg]) {
13759 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13761 ins->inst_c1 = vreg;
13762 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13763 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13765 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13771 if (cfg->gsharedvt_locals_var_ins) {
13772 /* Nullify if unused */
13773 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13774 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13777 g_free (live_range_start);
13778 g_free (live_range_end);
13779 g_free (live_range_start_bb);
13780 g_free (live_range_end_bb);
13785 * - use 'iadd' instead of 'int_add'
13786 * - handling ovf opcodes: decompose in method_to_ir.
13787 * - unify iregs/fregs
13788 * -> partly done, the missing parts are:
13789 * - a more complete unification would involve unifying the hregs as well, so
13790 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13791 * would no longer map to the machine hregs, so the code generators would need to
13792 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13793 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13794 * fp/non-fp branches speeds it up by about 15%.
13795 * - use sext/zext opcodes instead of shifts
13797 * - get rid of TEMPLOADs if possible and use vregs instead
13798 * - clean up usage of OP_P/OP_ opcodes
13799 * - cleanup usage of DUMMY_USE
13800 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13802 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13803 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13804 * - make sure handle_stack_args () is called before the branch is emitted
13805 * - when the new IR is done, get rid of all unused stuff
13806 * - COMPARE/BEQ as separate instructions or unify them ?
13807 * - keeping them separate allows specialized compare instructions like
13808 * compare_imm, compare_membase
13809 * - most back ends unify fp compare+branch, fp compare+ceq
13810 * - integrate mono_save_args into inline_method
13811 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13812 * - handle long shift opts on 32 bit platforms somehow: they require
13813 * 3 sregs (2 for arg1 and 1 for arg2)
13814 * - make byref a 'normal' type.
13815 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13816 * variable if needed.
13817 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13818 * like inline_method.
13819 * - remove inlining restrictions
13820 * - fix LNEG and enable cfold of INEG
13821 * - generalize x86 optimizations like ldelema as a peephole optimization
13822 * - add store_mem_imm for amd64
13823 * - optimize the loading of the interruption flag in the managed->native wrappers
13824 * - avoid special handling of OP_NOP in passes
13825 * - move code inserting instructions into one function/macro.
13826 * - try a coalescing phase after liveness analysis
13827 * - add float -> vreg conversion + local optimizations on !x86
13828 * - figure out how to handle decomposed branches during optimizations, ie.
13829 * compare+branch, op_jump_table+op_br etc.
13830 * - promote RuntimeXHandles to vregs
13831 * - vtype cleanups:
13832 * - add a NEW_VARLOADA_VREG macro
13833 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13834 * accessing vtype fields.
13835 * - get rid of I8CONST on 64 bit platforms
13836 * - dealing with the increase in code size due to branches created during opcode
13838 * - use extended basic blocks
13839 * - all parts of the JIT
13840 * - handle_global_vregs () && local regalloc
13841 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13842 * - sources of increase in code size:
13845 * - isinst and castclass
13846 * - lvregs not allocated to global registers even if used multiple times
13847 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13849 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13850 * - add all micro optimizations from the old JIT
13851 * - put tree optimizations into the deadce pass
13852 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13853 * specific function.
13854 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13855 * fcompare + branchCC.
13856 * - create a helper function for allocating a stack slot, taking into account
13857 * MONO_CFG_HAS_SPILLUP.
13859 * - merge the ia64 switch changes.
13860 * - optimize mono_regstate2_alloc_int/float.
13861 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13862 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13863 * parts of the tree could be separated by other instructions, killing the tree
13864 * arguments, or stores killing loads etc. Also, should we fold loads into other
13865 * instructions if the result of the load is used multiple times ?
13866 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13867 * - LAST MERGE: 108395.
13868 * - when returning vtypes in registers, generate IR and append it to the end of the
13869 * last bb instead of doing it in the epilog.
13870 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13878 - When to decompose opcodes:
13879 - earlier: this makes some optimizations hard to implement, since the low level IR
13880 no longer contains the neccessary information. But it is easier to do.
13881 - later: harder to implement, enables more optimizations.
13882 - Branches inside bblocks:
13883 - created when decomposing complex opcodes.
13884 - branches to another bblock: harmless, but not tracked by the branch
13885 optimizations, so need to branch to a label at the start of the bblock.
13886 - branches to inside the same bblock: very problematic, trips up the local
13887 reg allocator. Can be fixed by spitting the current bblock, but that is a
13888 complex operation, since some local vregs can become global vregs etc.
13889 - Local/global vregs:
13890 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13891 local register allocator.
13892 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13893 structure, created by mono_create_var (). Assigned to hregs or the stack by
13894 the global register allocator.
13895 - When to do optimizations like alu->alu_imm:
13896 - earlier -> saves work later on since the IR will be smaller/simpler
13897 - later -> can work on more instructions
13898 - Handling of valuetypes:
13899 - When a vtype is pushed on the stack, a new temporary is created, an
13900 instruction computing its address (LDADDR) is emitted and pushed on
13901 the stack. Need to optimize cases when the vtype is used immediately as in
13902 argument passing, stloc etc.
13903 - Instead of the to_end stuff in the old JIT, simply call the function handling
13904 the values on the stack before emitting the last instruction of the bb.
13907 #endif /* DISABLE_JIT */