2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <mono/utils/mono-compiler.h>
29 #ifdef HAVE_SYS_TIME_H
37 #include <mono/utils/memcheck.h>
39 #include <mono/metadata/abi-details.h>
40 #include <mono/metadata/assembly.h>
41 #include <mono/metadata/attrdefs.h>
42 #include <mono/metadata/loader.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/class.h>
45 #include <mono/metadata/object.h>
46 #include <mono/metadata/exception.h>
47 #include <mono/metadata/opcodes.h>
48 #include <mono/metadata/mono-endian.h>
49 #include <mono/metadata/tokentype.h>
50 #include <mono/metadata/tabledefs.h>
51 #include <mono/metadata/marshal.h>
52 #include <mono/metadata/debug-helpers.h>
53 #include <mono/metadata/mono-debug.h>
54 #include <mono/metadata/mono-debug-debugger.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/metadata/debug-mono-symfile.h>
63 #include <mono/utils/mono-compiler.h>
64 #include <mono/utils/mono-memory-model.h>
65 #include <mono/utils/mono-error-internals.h>
66 #include <mono/metadata/mono-basic-block.h>
67 #include <mono/metadata/reflection-internals.h>
68 #include <mono/utils/mono-threads-coop.h>
74 #include "jit-icalls.h"
76 #include "debugger-agent.h"
77 #include "seq-points.h"
78 #include "aot-compiler.h"
79 #include "mini-llvm.h"
81 #define BRANCH_COST 10
82 #define INLINE_LENGTH_LIMIT 20
84 /* These have 'cfg' as an implicit argument */
85 #define INLINE_FAILURE(msg) do { \
86 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
87 inline_failure (cfg, msg); \
88 goto exception_exit; \
91 #define CHECK_CFG_EXCEPTION do {\
92 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
93 goto exception_exit; \
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 /* helper methods signatures */
156 static MonoMethodSignature *helper_sig_domain_get;
157 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
158 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
159 static MonoMethodSignature *helper_sig_jit_thread_attach;
161 /* type loading helpers */
162 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
163 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
166 * Instruction metadata
174 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
175 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
181 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
186 /* keep in sync with the enum in mini.h */
189 #include "mini-ops.h"
194 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
195 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
197 * This should contain the index of the last sreg + 1. This is not the same
198 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
200 const gint8 ins_sreg_counts[] = {
201 #include "mini-ops.h"
206 #define MONO_INIT_VARINFO(vi,id) do { \
207 (vi)->range.first_use.pos.bid = 0xffff; \
213 mono_alloc_ireg (MonoCompile *cfg)
215 return alloc_ireg (cfg);
219 mono_alloc_lreg (MonoCompile *cfg)
221 return alloc_lreg (cfg);
225 mono_alloc_freg (MonoCompile *cfg)
227 return alloc_freg (cfg);
231 mono_alloc_preg (MonoCompile *cfg)
233 return alloc_preg (cfg);
237 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
239 return alloc_dreg (cfg, stack_type);
243 * mono_alloc_ireg_ref:
245 * Allocate an IREG, and mark it as holding a GC ref.
248 mono_alloc_ireg_ref (MonoCompile *cfg)
250 return alloc_ireg_ref (cfg);
254 * mono_alloc_ireg_mp:
256 * Allocate an IREG, and mark it as holding a managed pointer.
259 mono_alloc_ireg_mp (MonoCompile *cfg)
261 return alloc_ireg_mp (cfg);
265 * mono_alloc_ireg_copy:
267 * Allocate an IREG with the same GC type as VREG.
270 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
272 if (vreg_is_ref (cfg, vreg))
273 return alloc_ireg_ref (cfg);
274 else if (vreg_is_mp (cfg, vreg))
275 return alloc_ireg_mp (cfg);
277 return alloc_ireg (cfg);
281 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
286 type = mini_get_underlying_type (type);
288 switch (type->type) {
301 case MONO_TYPE_FNPTR:
303 case MONO_TYPE_CLASS:
304 case MONO_TYPE_STRING:
305 case MONO_TYPE_OBJECT:
306 case MONO_TYPE_SZARRAY:
307 case MONO_TYPE_ARRAY:
311 #if SIZEOF_REGISTER == 8
317 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
320 case MONO_TYPE_VALUETYPE:
321 if (type->data.klass->enumtype) {
322 type = mono_class_enum_basetype (type->data.klass);
325 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
328 case MONO_TYPE_TYPEDBYREF:
330 case MONO_TYPE_GENERICINST:
331 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
333 type = &type->data.generic_class->container_class->byval_arg;
337 g_assert (cfg->gshared);
338 if (mini_type_var_is_vt (type))
341 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
343 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
349 mono_print_bb (MonoBasicBlock *bb, const char *msg)
354 printf ("\n%s %d: [IN: ", msg, bb->block_num);
355 for (i = 0; i < bb->in_count; ++i)
356 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
358 for (i = 0; i < bb->out_count; ++i)
359 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
361 for (tree = bb->code; tree; tree = tree->next)
362 mono_print_ins_index (-1, tree);
366 mono_create_helper_signatures (void)
368 helper_sig_domain_get = mono_create_icall_signature ("ptr");
369 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
370 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
371 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
374 static MONO_NEVER_INLINE void
375 break_on_unverified (void)
377 if (mini_get_debug_options ()->break_on_unverified)
381 static MONO_NEVER_INLINE void
382 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
384 char *method_fname = mono_method_full_name (method, TRUE);
385 char *field_fname = mono_field_full_name (field);
386 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
387 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
388 g_free (method_fname);
389 g_free (field_fname);
392 static MONO_NEVER_INLINE void
393 inline_failure (MonoCompile *cfg, const char *msg)
395 if (cfg->verbose_level >= 2)
396 printf ("inline failed: %s\n", msg);
397 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
400 static MONO_NEVER_INLINE void
401 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
403 if (cfg->verbose_level > 2) \
404 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
408 static MONO_NEVER_INLINE void
409 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
412 if (cfg->verbose_level >= 2)
413 printf ("%s\n", cfg->exception_message);
414 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
418 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
419 * foo<T> (int i) { ldarg.0; box T; }
421 #define UNVERIFIED do { \
422 if (cfg->gsharedvt) { \
423 if (cfg->verbose_level > 2) \
424 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
425 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
426 goto exception_exit; \
428 break_on_unverified (); \
432 #define GET_BBLOCK(cfg,tblock,ip) do { \
433 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
435 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
436 NEW_BBLOCK (cfg, (tblock)); \
437 (tblock)->cil_code = (ip); \
438 ADD_BBLOCK (cfg, (tblock)); \
442 #if defined(TARGET_X86) || defined(TARGET_AMD64)
443 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
444 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
445 (dest)->dreg = alloc_ireg_mp ((cfg)); \
446 (dest)->sreg1 = (sr1); \
447 (dest)->sreg2 = (sr2); \
448 (dest)->inst_imm = (imm); \
449 (dest)->backend.shift_amount = (shift); \
450 MONO_ADD_INS ((cfg)->cbb, (dest)); \
454 /* Emit conversions so both operands of a binary opcode are of the same type */
456 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
458 MonoInst *arg1 = *arg1_ref;
459 MonoInst *arg2 = *arg2_ref;
462 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
463 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
466 /* Mixing r4/r8 is allowed by the spec */
467 if (arg1->type == STACK_R4) {
468 int dreg = alloc_freg (cfg);
470 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
471 conv->type = STACK_R8;
475 if (arg2->type == STACK_R4) {
476 int dreg = alloc_freg (cfg);
478 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
479 conv->type = STACK_R8;
485 #if SIZEOF_REGISTER == 8
486 /* FIXME: Need to add many more cases */
487 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
490 int dr = alloc_preg (cfg);
491 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
492 (ins)->sreg2 = widen->dreg;
497 #define ADD_BINOP(op) do { \
498 MONO_INST_NEW (cfg, ins, (op)); \
500 ins->sreg1 = sp [0]->dreg; \
501 ins->sreg2 = sp [1]->dreg; \
502 type_from_op (cfg, ins, sp [0], sp [1]); \
504 /* Have to insert a widening op */ \
505 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
506 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
507 MONO_ADD_INS ((cfg)->cbb, (ins)); \
508 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
511 #define ADD_UNOP(op) do { \
512 MONO_INST_NEW (cfg, ins, (op)); \
514 ins->sreg1 = sp [0]->dreg; \
515 type_from_op (cfg, ins, sp [0], NULL); \
517 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
518 MONO_ADD_INS ((cfg)->cbb, (ins)); \
519 *sp++ = mono_decompose_opcode (cfg, ins); \
522 #define ADD_BINCOND(next_block) do { \
525 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
526 cmp->sreg1 = sp [0]->dreg; \
527 cmp->sreg2 = sp [1]->dreg; \
528 type_from_op (cfg, cmp, sp [0], sp [1]); \
530 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
531 type_from_op (cfg, ins, sp [0], sp [1]); \
532 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
533 GET_BBLOCK (cfg, tblock, target); \
534 link_bblock (cfg, cfg->cbb, tblock); \
535 ins->inst_true_bb = tblock; \
536 if ((next_block)) { \
537 link_bblock (cfg, cfg->cbb, (next_block)); \
538 ins->inst_false_bb = (next_block); \
539 start_new_bblock = 1; \
541 GET_BBLOCK (cfg, tblock, ip); \
542 link_bblock (cfg, cfg->cbb, tblock); \
543 ins->inst_false_bb = tblock; \
544 start_new_bblock = 2; \
546 if (sp != stack_start) { \
547 handle_stack_args (cfg, stack_start, sp - stack_start); \
548 CHECK_UNVERIFIABLE (cfg); \
550 MONO_ADD_INS (cfg->cbb, cmp); \
551 MONO_ADD_INS (cfg->cbb, ins); \
555 * link_bblock: Links two basic blocks
557 * links two basic blocks in the control flow graph, the 'from'
558 * argument is the starting block and the 'to' argument is the block
559 * the control flow ends to after 'from'.
562 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
564 MonoBasicBlock **newa;
568 if (from->cil_code) {
570 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
572 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
575 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
577 printf ("edge from entry to exit\n");
582 for (i = 0; i < from->out_count; ++i) {
583 if (to == from->out_bb [i]) {
589 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
590 for (i = 0; i < from->out_count; ++i) {
591 newa [i] = from->out_bb [i];
599 for (i = 0; i < to->in_count; ++i) {
600 if (from == to->in_bb [i]) {
606 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
607 for (i = 0; i < to->in_count; ++i) {
608 newa [i] = to->in_bb [i];
617 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
619 link_bblock (cfg, from, to);
623 * mono_find_block_region:
625 * We mark each basic block with a region ID. We use that to avoid BB
626 * optimizations when blocks are in different regions.
629 * A region token that encodes where this region is, and information
630 * about the clause owner for this block.
632 * The region encodes the try/catch/filter clause that owns this block
633 * as well as the type. -1 is a special value that represents a block
634 * that is in none of try/catch/filter.
637 mono_find_block_region (MonoCompile *cfg, int offset)
639 MonoMethodHeader *header = cfg->header;
640 MonoExceptionClause *clause;
643 for (i = 0; i < header->num_clauses; ++i) {
644 clause = &header->clauses [i];
645 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
646 (offset < (clause->handler_offset)))
647 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
649 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
650 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
651 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
652 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
653 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
655 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
658 for (i = 0; i < header->num_clauses; ++i) {
659 clause = &header->clauses [i];
661 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
662 return ((i + 1) << 8) | clause->flags;
669 ip_in_finally_clause (MonoCompile *cfg, int offset)
671 MonoMethodHeader *header = cfg->header;
672 MonoExceptionClause *clause;
675 for (i = 0; i < header->num_clauses; ++i) {
676 clause = &header->clauses [i];
677 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
680 if (MONO_OFFSET_IN_HANDLER (clause, offset))
687 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
689 MonoMethodHeader *header = cfg->header;
690 MonoExceptionClause *clause;
694 for (i = 0; i < header->num_clauses; ++i) {
695 clause = &header->clauses [i];
696 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
697 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
698 if (clause->flags == type)
699 res = g_list_append (res, clause);
706 mono_create_spvar_for_region (MonoCompile *cfg, int region)
710 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
714 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
715 /* prevent it from being register allocated */
716 var->flags |= MONO_INST_VOLATILE;
718 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
722 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
724 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
728 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
732 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
736 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
737 /* prevent it from being register allocated */
738 var->flags |= MONO_INST_VOLATILE;
740 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
746 * Returns the type used in the eval stack when @type is loaded.
747 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
750 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
754 type = mini_get_underlying_type (type);
755 inst->klass = klass = mono_class_from_mono_type (type);
757 inst->type = STACK_MP;
762 switch (type->type) {
764 inst->type = STACK_INV;
772 inst->type = STACK_I4;
777 case MONO_TYPE_FNPTR:
778 inst->type = STACK_PTR;
780 case MONO_TYPE_CLASS:
781 case MONO_TYPE_STRING:
782 case MONO_TYPE_OBJECT:
783 case MONO_TYPE_SZARRAY:
784 case MONO_TYPE_ARRAY:
785 inst->type = STACK_OBJ;
789 inst->type = STACK_I8;
792 inst->type = cfg->r4_stack_type;
795 inst->type = STACK_R8;
797 case MONO_TYPE_VALUETYPE:
798 if (type->data.klass->enumtype) {
799 type = mono_class_enum_basetype (type->data.klass);
803 inst->type = STACK_VTYPE;
806 case MONO_TYPE_TYPEDBYREF:
807 inst->klass = mono_defaults.typed_reference_class;
808 inst->type = STACK_VTYPE;
810 case MONO_TYPE_GENERICINST:
811 type = &type->data.generic_class->container_class->byval_arg;
815 g_assert (cfg->gshared);
816 if (mini_is_gsharedvt_type (type)) {
817 g_assert (cfg->gsharedvt);
818 inst->type = STACK_VTYPE;
820 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
824 g_error ("unknown type 0x%02x in eval stack type", type->type);
829 * The following tables are used to quickly validate the IL code in type_from_op ().
832 bin_num_table [STACK_MAX] [STACK_MAX] = {
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
838 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
846 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
849 /* reduce the size of this table */
851 bin_int_table [STACK_MAX] [STACK_MAX] = {
852 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
863 bin_comp_table [STACK_MAX] [STACK_MAX] = {
864 /* Inv i L p F & O vt r4 */
866 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
867 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
868 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
869 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
870 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
871 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
872 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
873 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
876 /* reduce the size of this table */
878 shift_table [STACK_MAX] [STACK_MAX] = {
879 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
890 * Tables to map from the non-specific opcode to the matching
891 * type-specific opcode.
893 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
895 binops_op_map [STACK_MAX] = {
896 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
899 /* handles from CEE_NEG to CEE_CONV_U8 */
901 unops_op_map [STACK_MAX] = {
902 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
905 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
907 ovfops_op_map [STACK_MAX] = {
908 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
911 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
913 ovf2ops_op_map [STACK_MAX] = {
914 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
917 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
919 ovf3ops_op_map [STACK_MAX] = {
920 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
923 /* handles from CEE_BEQ to CEE_BLT_UN */
925 beqops_op_map [STACK_MAX] = {
926 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
929 /* handles from CEE_CEQ to CEE_CLT_UN */
931 ceqops_op_map [STACK_MAX] = {
932 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
936 * Sets ins->type (the type on the eval stack) according to the
937 * type of the opcode and the arguments to it.
938 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
940 * FIXME: this function sets ins->type unconditionally in some cases, but
941 * it should set it to invalid for some types (a conv.x on an object)
944 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
946 switch (ins->opcode) {
953 /* FIXME: check unverifiable args for STACK_MP */
954 ins->type = bin_num_table [src1->type] [src2->type];
955 ins->opcode += binops_op_map [ins->type];
962 ins->type = bin_int_table [src1->type] [src2->type];
963 ins->opcode += binops_op_map [ins->type];
968 ins->type = shift_table [src1->type] [src2->type];
969 ins->opcode += binops_op_map [ins->type];
974 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
975 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
976 ins->opcode = OP_LCOMPARE;
977 else if (src1->type == STACK_R4)
978 ins->opcode = OP_RCOMPARE;
979 else if (src1->type == STACK_R8)
980 ins->opcode = OP_FCOMPARE;
982 ins->opcode = OP_ICOMPARE;
984 case OP_ICOMPARE_IMM:
985 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
986 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
987 ins->opcode = OP_LCOMPARE_IMM;
999 ins->opcode += beqops_op_map [src1->type];
1002 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1003 ins->opcode += ceqops_op_map [src1->type];
1009 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1010 ins->opcode += ceqops_op_map [src1->type];
1014 ins->type = neg_table [src1->type];
1015 ins->opcode += unops_op_map [ins->type];
1018 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1019 ins->type = src1->type;
1021 ins->type = STACK_INV;
1022 ins->opcode += unops_op_map [ins->type];
1028 ins->type = STACK_I4;
1029 ins->opcode += unops_op_map [src1->type];
1032 ins->type = STACK_R8;
1033 switch (src1->type) {
1036 ins->opcode = OP_ICONV_TO_R_UN;
1039 ins->opcode = OP_LCONV_TO_R_UN;
1043 case CEE_CONV_OVF_I1:
1044 case CEE_CONV_OVF_U1:
1045 case CEE_CONV_OVF_I2:
1046 case CEE_CONV_OVF_U2:
1047 case CEE_CONV_OVF_I4:
1048 case CEE_CONV_OVF_U4:
1049 ins->type = STACK_I4;
1050 ins->opcode += ovf3ops_op_map [src1->type];
1052 case CEE_CONV_OVF_I_UN:
1053 case CEE_CONV_OVF_U_UN:
1054 ins->type = STACK_PTR;
1055 ins->opcode += ovf2ops_op_map [src1->type];
1057 case CEE_CONV_OVF_I1_UN:
1058 case CEE_CONV_OVF_I2_UN:
1059 case CEE_CONV_OVF_I4_UN:
1060 case CEE_CONV_OVF_U1_UN:
1061 case CEE_CONV_OVF_U2_UN:
1062 case CEE_CONV_OVF_U4_UN:
1063 ins->type = STACK_I4;
1064 ins->opcode += ovf2ops_op_map [src1->type];
1067 ins->type = STACK_PTR;
1068 switch (src1->type) {
1070 ins->opcode = OP_ICONV_TO_U;
1074 #if SIZEOF_VOID_P == 8
1075 ins->opcode = OP_LCONV_TO_U;
1077 ins->opcode = OP_MOVE;
1081 ins->opcode = OP_LCONV_TO_U;
1084 ins->opcode = OP_FCONV_TO_U;
1090 ins->type = STACK_I8;
1091 ins->opcode += unops_op_map [src1->type];
1093 case CEE_CONV_OVF_I8:
1094 case CEE_CONV_OVF_U8:
1095 ins->type = STACK_I8;
1096 ins->opcode += ovf3ops_op_map [src1->type];
1098 case CEE_CONV_OVF_U8_UN:
1099 case CEE_CONV_OVF_I8_UN:
1100 ins->type = STACK_I8;
1101 ins->opcode += ovf2ops_op_map [src1->type];
1104 ins->type = cfg->r4_stack_type;
1105 ins->opcode += unops_op_map [src1->type];
1108 ins->type = STACK_R8;
1109 ins->opcode += unops_op_map [src1->type];
1112 ins->type = STACK_R8;
1116 ins->type = STACK_I4;
1117 ins->opcode += ovfops_op_map [src1->type];
1120 case CEE_CONV_OVF_I:
1121 case CEE_CONV_OVF_U:
1122 ins->type = STACK_PTR;
1123 ins->opcode += ovfops_op_map [src1->type];
1126 case CEE_ADD_OVF_UN:
1128 case CEE_MUL_OVF_UN:
1130 case CEE_SUB_OVF_UN:
1131 ins->type = bin_num_table [src1->type] [src2->type];
1132 ins->opcode += ovfops_op_map [src1->type];
1133 if (ins->type == STACK_R8)
1134 ins->type = STACK_INV;
1136 case OP_LOAD_MEMBASE:
1137 ins->type = STACK_PTR;
1139 case OP_LOADI1_MEMBASE:
1140 case OP_LOADU1_MEMBASE:
1141 case OP_LOADI2_MEMBASE:
1142 case OP_LOADU2_MEMBASE:
1143 case OP_LOADI4_MEMBASE:
1144 case OP_LOADU4_MEMBASE:
1145 ins->type = STACK_PTR;
1147 case OP_LOADI8_MEMBASE:
1148 ins->type = STACK_I8;
1150 case OP_LOADR4_MEMBASE:
1151 ins->type = cfg->r4_stack_type;
1153 case OP_LOADR8_MEMBASE:
1154 ins->type = STACK_R8;
1157 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1161 if (ins->type == STACK_MP)
1162 ins->klass = mono_defaults.object_class;
1167 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1173 param_table [STACK_MAX] [STACK_MAX] = {
1178 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1183 switch (args->type) {
1193 for (i = 0; i < sig->param_count; ++i) {
1194 switch (args [i].type) {
1198 if (!sig->params [i]->byref)
1202 if (sig->params [i]->byref)
1204 switch (sig->params [i]->type) {
1205 case MONO_TYPE_CLASS:
1206 case MONO_TYPE_STRING:
1207 case MONO_TYPE_OBJECT:
1208 case MONO_TYPE_SZARRAY:
1209 case MONO_TYPE_ARRAY:
1216 if (sig->params [i]->byref)
1218 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1227 /*if (!param_table [args [i].type] [sig->params [i]->type])
1235 * When we need a pointer to the current domain many times in a method, we
1236 * call mono_domain_get() once and we store the result in a local variable.
1237 * This function returns the variable that represents the MonoDomain*.
1239 inline static MonoInst *
1240 mono_get_domainvar (MonoCompile *cfg)
1242 if (!cfg->domainvar)
1243 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1244 return cfg->domainvar;
1248 * The got_var contains the address of the Global Offset Table when AOT
1252 mono_get_got_var (MonoCompile *cfg)
1254 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1256 if (!cfg->got_var) {
1257 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1259 return cfg->got_var;
1263 mono_get_vtable_var (MonoCompile *cfg)
1265 g_assert (cfg->gshared);
1267 if (!cfg->rgctx_var) {
1268 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1269 /* force the var to be stack allocated */
1270 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1273 return cfg->rgctx_var;
1277 type_from_stack_type (MonoInst *ins) {
1278 switch (ins->type) {
1279 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1280 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1281 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1282 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1283 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1285 return &ins->klass->this_arg;
1286 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1287 case STACK_VTYPE: return &ins->klass->byval_arg;
1289 g_error ("stack type %d to monotype not handled\n", ins->type);
1294 static G_GNUC_UNUSED int
1295 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1297 t = mono_type_get_underlying_type (t);
1309 case MONO_TYPE_FNPTR:
1311 case MONO_TYPE_CLASS:
1312 case MONO_TYPE_STRING:
1313 case MONO_TYPE_OBJECT:
1314 case MONO_TYPE_SZARRAY:
1315 case MONO_TYPE_ARRAY:
1321 return cfg->r4_stack_type;
1324 case MONO_TYPE_VALUETYPE:
1325 case MONO_TYPE_TYPEDBYREF:
1327 case MONO_TYPE_GENERICINST:
1328 if (mono_type_generic_inst_is_valuetype (t))
1334 g_assert_not_reached ();
1341 array_access_to_klass (int opcode)
1345 return mono_defaults.byte_class;
1347 return mono_defaults.uint16_class;
1350 return mono_defaults.int_class;
1353 return mono_defaults.sbyte_class;
1356 return mono_defaults.int16_class;
1359 return mono_defaults.int32_class;
1361 return mono_defaults.uint32_class;
1364 return mono_defaults.int64_class;
1367 return mono_defaults.single_class;
1370 return mono_defaults.double_class;
1371 case CEE_LDELEM_REF:
1372 case CEE_STELEM_REF:
1373 return mono_defaults.object_class;
1375 g_assert_not_reached ();
1381 * We try to share variables when possible
1384 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1389 /* inlining can result in deeper stacks */
1390 if (slot >= cfg->header->max_stack)
1391 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1393 pos = ins->type - 1 + slot * STACK_MAX;
1395 switch (ins->type) {
1402 if ((vnum = cfg->intvars [pos]))
1403 return cfg->varinfo [vnum];
1404 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1405 cfg->intvars [pos] = res->inst_c0;
1408 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1414 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1417 * Don't use this if a generic_context is set, since that means AOT can't
1418 * look up the method using just the image+token.
1419 * table == 0 means this is a reference made from a wrapper.
1421 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1422 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1423 jump_info_token->image = image;
1424 jump_info_token->token = token;
1425 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1430 * This function is called to handle items that are left on the evaluation stack
1431 * at basic block boundaries. What happens is that we save the values to local variables
1432 * and we reload them later when first entering the target basic block (with the
1433 * handle_loaded_temps () function).
1434 * A single joint point will use the same variables (stored in the array bb->out_stack or
1435 * bb->in_stack, if the basic block is before or after the joint point).
1437 * This function needs to be called _before_ emitting the last instruction of
1438 * the bb (i.e. before emitting a branch).
1439 * If the stack merge fails at a join point, cfg->unverifiable is set.
1442 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1445 MonoBasicBlock *bb = cfg->cbb;
1446 MonoBasicBlock *outb;
1447 MonoInst *inst, **locals;
1452 if (cfg->verbose_level > 3)
1453 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1454 if (!bb->out_scount) {
1455 bb->out_scount = count;
1456 //printf ("bblock %d has out:", bb->block_num);
1458 for (i = 0; i < bb->out_count; ++i) {
1459 outb = bb->out_bb [i];
1460 /* exception handlers are linked, but they should not be considered for stack args */
1461 if (outb->flags & BB_EXCEPTION_HANDLER)
1463 //printf (" %d", outb->block_num);
1464 if (outb->in_stack) {
1466 bb->out_stack = outb->in_stack;
1472 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1473 for (i = 0; i < count; ++i) {
1475 * try to reuse temps already allocated for this purpouse, if they occupy the same
1476 * stack slot and if they are of the same type.
1477 * This won't cause conflicts since if 'local' is used to
1478 * store one of the values in the in_stack of a bblock, then
1479 * the same variable will be used for the same outgoing stack
1481 * This doesn't work when inlining methods, since the bblocks
1482 * in the inlined methods do not inherit their in_stack from
1483 * the bblock they are inlined to. See bug #58863 for an
1486 if (cfg->inlined_method)
1487 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1489 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1494 for (i = 0; i < bb->out_count; ++i) {
1495 outb = bb->out_bb [i];
1496 /* exception handlers are linked, but they should not be considered for stack args */
1497 if (outb->flags & BB_EXCEPTION_HANDLER)
1499 if (outb->in_scount) {
1500 if (outb->in_scount != bb->out_scount) {
1501 cfg->unverifiable = TRUE;
1504 continue; /* check they are the same locals */
1506 outb->in_scount = count;
1507 outb->in_stack = bb->out_stack;
1510 locals = bb->out_stack;
1512 for (i = 0; i < count; ++i) {
1513 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1514 inst->cil_code = sp [i]->cil_code;
1515 sp [i] = locals [i];
1516 if (cfg->verbose_level > 3)
1517 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1521 * It is possible that the out bblocks already have in_stack assigned, and
1522 * the in_stacks differ. In this case, we will store to all the different
1529 /* Find a bblock which has a different in_stack */
1531 while (bindex < bb->out_count) {
1532 outb = bb->out_bb [bindex];
1533 /* exception handlers are linked, but they should not be considered for stack args */
1534 if (outb->flags & BB_EXCEPTION_HANDLER) {
1538 if (outb->in_stack != locals) {
1539 for (i = 0; i < count; ++i) {
1540 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1541 inst->cil_code = sp [i]->cil_code;
1542 sp [i] = locals [i];
1543 if (cfg->verbose_level > 3)
1544 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1546 locals = outb->in_stack;
1556 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1560 if (cfg->compile_aot) {
1561 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1567 ji.type = patch_type;
1568 ji.data.target = data;
1569 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1570 mono_error_assert_ok (&error);
1572 EMIT_NEW_PCONST (cfg, ins, target);
1578 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1580 return emit_runtime_constant (cfg, patch_type, data);
1584 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1588 g_assert (val == 0);
1593 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1596 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1599 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1602 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1604 #if SIZEOF_REGISTER == 8
1606 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1612 val_reg = alloc_preg (cfg);
1614 if (SIZEOF_REGISTER == 8)
1615 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1617 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1620 /* This could be optimized further if neccesary */
1622 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1629 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1631 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1636 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1643 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1648 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1653 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1660 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1667 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1668 g_assert (size < 10000);
1671 /* This could be optimized further if neccesary */
1673 cur_reg = alloc_preg (cfg);
1674 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1675 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1682 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1684 cur_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1694 cur_reg = alloc_preg (cfg);
1695 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1702 cur_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1720 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1724 if (cfg->compile_aot) {
1725 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1726 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1728 ins->sreg2 = c->dreg;
1729 MONO_ADD_INS (cfg->cbb, ins);
1731 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1733 ins->inst_offset = mini_get_tls_offset (tls_key);
1734 MONO_ADD_INS (cfg->cbb, ins);
1741 * Emit IR to push the current LMF onto the LMF stack.
1744 emit_push_lmf (MonoCompile *cfg)
1747 * Emit IR to push the LMF:
1748 * lmf_addr = <lmf_addr from tls>
1749 * lmf->lmf_addr = lmf_addr
1750 * lmf->prev_lmf = *lmf_addr
1753 int lmf_reg, prev_lmf_reg;
1754 MonoInst *ins, *lmf_ins;
1759 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1760 /* Load current lmf */
1761 lmf_ins = mono_get_lmf_intrinsic (cfg);
1763 MONO_ADD_INS (cfg->cbb, lmf_ins);
1764 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1765 lmf_reg = ins->dreg;
1766 /* Save previous_lmf */
1767 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1769 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1772 * Store lmf_addr in a variable, so it can be allocated to a global register.
1774 if (!cfg->lmf_addr_var)
1775 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1778 ins = mono_get_jit_tls_intrinsic (cfg);
1780 int jit_tls_dreg = ins->dreg;
1782 MONO_ADD_INS (cfg->cbb, ins);
1783 lmf_reg = alloc_preg (cfg);
1784 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1786 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1789 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1791 MONO_ADD_INS (cfg->cbb, lmf_ins);
1794 MonoInst *args [16], *jit_tls_ins, *ins;
1796 /* Inline mono_get_lmf_addr () */
1797 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
1799 /* Load mono_jit_tls_id */
1800 if (cfg->compile_aot)
1801 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
1803 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
1804 /* call pthread_getspecific () */
1805 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
1806 /* lmf_addr = &jit_tls->lmf */
1807 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1810 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1814 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1816 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1817 lmf_reg = ins->dreg;
1819 prev_lmf_reg = alloc_preg (cfg);
1820 /* Save previous_lmf */
1821 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1822 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1824 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1831 * Emit IR to pop the current LMF from the LMF stack.
1834 emit_pop_lmf (MonoCompile *cfg)
1836 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1842 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1843 lmf_reg = ins->dreg;
1845 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1846 /* Load previous_lmf */
1847 prev_lmf_reg = alloc_preg (cfg);
1848 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1850 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
1853 * Emit IR to pop the LMF:
1854 * *(lmf->lmf_addr) = lmf->prev_lmf
1856 /* This could be called before emit_push_lmf () */
1857 if (!cfg->lmf_addr_var)
1858 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1859 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1861 prev_lmf_reg = alloc_preg (cfg);
1862 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1863 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1868 emit_instrumentation_call (MonoCompile *cfg, void *func)
1870 MonoInst *iargs [1];
1873 * Avoid instrumenting inlined methods since it can
1874 * distort profiling results.
1876 if (cfg->method != cfg->current_method)
1879 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1880 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1881 mono_emit_jit_icall (cfg, func, iargs);
1886 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1889 type = mini_get_underlying_type (type);
1890 switch (type->type) {
1891 case MONO_TYPE_VOID:
1892 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1899 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1903 case MONO_TYPE_FNPTR:
1904 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1905 case MONO_TYPE_CLASS:
1906 case MONO_TYPE_STRING:
1907 case MONO_TYPE_OBJECT:
1908 case MONO_TYPE_SZARRAY:
1909 case MONO_TYPE_ARRAY:
1910 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1913 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1916 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1918 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1920 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1921 case MONO_TYPE_VALUETYPE:
1922 if (type->data.klass->enumtype) {
1923 type = mono_class_enum_basetype (type->data.klass);
1926 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1927 case MONO_TYPE_TYPEDBYREF:
1928 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1929 case MONO_TYPE_GENERICINST:
1930 type = &type->data.generic_class->container_class->byval_arg;
1933 case MONO_TYPE_MVAR:
1935 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1937 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1942 //XXX this ignores if t is byref
1943 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1946 * target_type_is_incompatible:
1947 * @cfg: MonoCompile context
1949 * Check that the item @arg on the evaluation stack can be stored
1950 * in the target type (can be a local, or field, etc).
1951 * The cfg arg can be used to check if we need verification or just
1954 * Returns: non-0 value if arg can't be stored on a target.
1957 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1959 MonoType *simple_type;
1962 if (target->byref) {
1963 /* FIXME: check that the pointed to types match */
1964 if (arg->type == STACK_MP) {
1965 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1966 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1967 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1969 /* if the target is native int& or same type */
1970 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
1973 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1974 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
1975 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1979 if (arg->type == STACK_PTR)
1984 simple_type = mini_get_underlying_type (target);
1985 switch (simple_type->type) {
1986 case MONO_TYPE_VOID:
1994 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1998 /* STACK_MP is needed when setting pinned locals */
1999 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2004 case MONO_TYPE_FNPTR:
2006 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2007 * in native int. (#688008).
2009 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2012 case MONO_TYPE_CLASS:
2013 case MONO_TYPE_STRING:
2014 case MONO_TYPE_OBJECT:
2015 case MONO_TYPE_SZARRAY:
2016 case MONO_TYPE_ARRAY:
2017 if (arg->type != STACK_OBJ)
2019 /* FIXME: check type compatibility */
2023 if (arg->type != STACK_I8)
2027 if (arg->type != cfg->r4_stack_type)
2031 if (arg->type != STACK_R8)
2034 case MONO_TYPE_VALUETYPE:
2035 if (arg->type != STACK_VTYPE)
2037 klass = mono_class_from_mono_type (simple_type);
2038 if (klass != arg->klass)
2041 case MONO_TYPE_TYPEDBYREF:
2042 if (arg->type != STACK_VTYPE)
2044 klass = mono_class_from_mono_type (simple_type);
2045 if (klass != arg->klass)
2048 case MONO_TYPE_GENERICINST:
2049 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2050 MonoClass *target_class;
2051 if (arg->type != STACK_VTYPE)
2053 klass = mono_class_from_mono_type (simple_type);
2054 target_class = mono_class_from_mono_type (target);
2055 /* The second cases is needed when doing partial sharing */
2056 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2060 if (arg->type != STACK_OBJ)
2062 /* FIXME: check type compatibility */
2066 case MONO_TYPE_MVAR:
2067 g_assert (cfg->gshared);
2068 if (mini_type_var_is_vt (simple_type)) {
2069 if (arg->type != STACK_VTYPE)
2072 if (arg->type != STACK_OBJ)
2077 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2083 * Prepare arguments for passing to a function call.
2084 * Return a non-zero value if the arguments can't be passed to the given
2086 * The type checks are not yet complete and some conversions may need
2087 * casts on 32 or 64 bit architectures.
2089 * FIXME: implement this using target_type_is_incompatible ()
2092 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2094 MonoType *simple_type;
2098 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2102 for (i = 0; i < sig->param_count; ++i) {
2103 if (sig->params [i]->byref) {
2104 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2108 simple_type = mini_get_underlying_type (sig->params [i]);
2110 switch (simple_type->type) {
2111 case MONO_TYPE_VOID:
2120 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2126 case MONO_TYPE_FNPTR:
2127 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2130 case MONO_TYPE_CLASS:
2131 case MONO_TYPE_STRING:
2132 case MONO_TYPE_OBJECT:
2133 case MONO_TYPE_SZARRAY:
2134 case MONO_TYPE_ARRAY:
2135 if (args [i]->type != STACK_OBJ)
2140 if (args [i]->type != STACK_I8)
2144 if (args [i]->type != cfg->r4_stack_type)
2148 if (args [i]->type != STACK_R8)
2151 case MONO_TYPE_VALUETYPE:
2152 if (simple_type->data.klass->enumtype) {
2153 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2156 if (args [i]->type != STACK_VTYPE)
2159 case MONO_TYPE_TYPEDBYREF:
2160 if (args [i]->type != STACK_VTYPE)
2163 case MONO_TYPE_GENERICINST:
2164 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2167 case MONO_TYPE_MVAR:
2169 if (args [i]->type != STACK_VTYPE)
2173 g_error ("unknown type 0x%02x in check_call_signature",
2181 callvirt_to_call (int opcode)
2184 case OP_CALL_MEMBASE:
2186 case OP_VOIDCALL_MEMBASE:
2188 case OP_FCALL_MEMBASE:
2190 case OP_RCALL_MEMBASE:
2192 case OP_VCALL_MEMBASE:
2194 case OP_LCALL_MEMBASE:
2197 g_assert_not_reached ();
2204 callvirt_to_call_reg (int opcode)
2207 case OP_CALL_MEMBASE:
2209 case OP_VOIDCALL_MEMBASE:
2210 return OP_VOIDCALL_REG;
2211 case OP_FCALL_MEMBASE:
2212 return OP_FCALL_REG;
2213 case OP_RCALL_MEMBASE:
2214 return OP_RCALL_REG;
2215 case OP_VCALL_MEMBASE:
2216 return OP_VCALL_REG;
2217 case OP_LCALL_MEMBASE:
2218 return OP_LCALL_REG;
2220 g_assert_not_reached ();
2226 /* Either METHOD or IMT_ARG needs to be set */
2228 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2232 if (COMPILE_LLVM (cfg)) {
2234 method_reg = alloc_preg (cfg);
2235 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2237 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2238 method_reg = ins->dreg;
2242 call->imt_arg_reg = method_reg;
2244 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2249 method_reg = alloc_preg (cfg);
2250 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2252 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2253 method_reg = ins->dreg;
2256 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2259 static MonoJumpInfo *
2260 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2262 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2266 ji->data.target = target;
2272 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2275 return mono_class_check_context_used (klass);
2281 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2284 return mono_method_check_context_used (method);
2290 * check_method_sharing:
2292 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2295 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2297 gboolean pass_vtable = FALSE;
2298 gboolean pass_mrgctx = FALSE;
2300 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2301 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2302 gboolean sharable = FALSE;
2304 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2308 * Pass vtable iff target method might
2309 * be shared, which means that sharing
2310 * is enabled for its class and its
2311 * context is sharable (and it's not a
2314 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2318 if (mini_method_get_context (cmethod) &&
2319 mini_method_get_context (cmethod)->method_inst) {
2320 g_assert (!pass_vtable);
2322 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2325 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2330 if (out_pass_vtable)
2331 *out_pass_vtable = pass_vtable;
2332 if (out_pass_mrgctx)
2333 *out_pass_mrgctx = pass_mrgctx;
2336 inline static MonoCallInst *
2337 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2338 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2342 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2350 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2352 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2354 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2357 call->signature = sig;
2358 call->rgctx_reg = rgctx;
2359 sig_ret = mini_get_underlying_type (sig->ret);
2361 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2364 if (mini_type_is_vtype (sig_ret)) {
2365 call->vret_var = cfg->vret_addr;
2366 //g_assert_not_reached ();
2368 } else if (mini_type_is_vtype (sig_ret)) {
2369 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2372 temp->backend.is_pinvoke = sig->pinvoke;
2375 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2376 * address of return value to increase optimization opportunities.
2377 * Before vtype decomposition, the dreg of the call ins itself represents the
2378 * fact the call modifies the return value. After decomposition, the call will
2379 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2380 * will be transformed into an LDADDR.
2382 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2383 loada->dreg = alloc_preg (cfg);
2384 loada->inst_p0 = temp;
2385 /* We reference the call too since call->dreg could change during optimization */
2386 loada->inst_p1 = call;
2387 MONO_ADD_INS (cfg->cbb, loada);
2389 call->inst.dreg = temp->dreg;
2391 call->vret_var = loada;
2392 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2393 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2395 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2396 if (COMPILE_SOFT_FLOAT (cfg)) {
2398 * If the call has a float argument, we would need to do an r8->r4 conversion using
2399 * an icall, but that cannot be done during the call sequence since it would clobber
2400 * the call registers + the stack. So we do it before emitting the call.
2402 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2404 MonoInst *in = call->args [i];
2406 if (i >= sig->hasthis)
2407 t = sig->params [i - sig->hasthis];
2409 t = &mono_defaults.int_class->byval_arg;
2410 t = mono_type_get_underlying_type (t);
2412 if (!t->byref && t->type == MONO_TYPE_R4) {
2413 MonoInst *iargs [1];
2417 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2419 /* The result will be in an int vreg */
2420 call->args [i] = conv;
2426 call->need_unbox_trampoline = unbox_trampoline;
2429 if (COMPILE_LLVM (cfg))
2430 mono_llvm_emit_call (cfg, call);
2432 mono_arch_emit_call (cfg, call);
2434 mono_arch_emit_call (cfg, call);
2437 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2438 cfg->flags |= MONO_CFG_HAS_CALLS;
2444 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2446 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2447 cfg->uses_rgctx_reg = TRUE;
2448 call->rgctx_reg = TRUE;
2450 call->rgctx_arg_reg = rgctx_reg;
2454 inline static MonoInst*
2455 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2460 gboolean check_sp = FALSE;
2462 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2463 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2465 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2470 rgctx_reg = mono_alloc_preg (cfg);
2471 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2475 if (!cfg->stack_inbalance_var)
2476 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2478 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2479 ins->dreg = cfg->stack_inbalance_var->dreg;
2480 MONO_ADD_INS (cfg->cbb, ins);
2483 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2485 call->inst.sreg1 = addr->dreg;
2488 emit_imt_argument (cfg, call, NULL, imt_arg);
2490 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2495 sp_reg = mono_alloc_preg (cfg);
2497 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2499 MONO_ADD_INS (cfg->cbb, ins);
2501 /* Restore the stack so we don't crash when throwing the exception */
2502 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2503 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2504 MONO_ADD_INS (cfg->cbb, ins);
2506 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2507 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2511 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2513 return (MonoInst*)call;
2517 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2520 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2523 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2524 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2526 #ifndef DISABLE_REMOTING
2527 gboolean might_be_remote = FALSE;
2529 gboolean virtual_ = this_ins != NULL;
2530 gboolean enable_for_aot = TRUE;
2533 MonoInst *call_target = NULL;
2535 gboolean need_unbox_trampoline;
2538 sig = mono_method_signature (method);
2540 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2541 g_assert_not_reached ();
2544 rgctx_reg = mono_alloc_preg (cfg);
2545 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2548 if (method->string_ctor) {
2549 /* Create the real signature */
2550 /* FIXME: Cache these */
2551 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2552 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2557 context_used = mini_method_check_context_used (cfg, method);
2559 #ifndef DISABLE_REMOTING
2560 might_be_remote = this_ins && sig->hasthis &&
2561 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2562 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2564 if (might_be_remote && context_used) {
2567 g_assert (cfg->gshared);
2569 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2571 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2575 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2576 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2578 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2580 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2582 #ifndef DISABLE_REMOTING
2583 if (might_be_remote)
2584 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2587 call->method = method;
2588 call->inst.flags |= MONO_INST_HAS_METHOD;
2589 call->inst.inst_left = this_ins;
2590 call->tail_call = tail;
2593 int vtable_reg, slot_reg, this_reg;
2596 this_reg = this_ins->dreg;
2598 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2599 MonoInst *dummy_use;
2601 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2603 /* Make a call to delegate->invoke_impl */
2604 call->inst.inst_basereg = this_reg;
2605 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2606 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2608 /* We must emit a dummy use here because the delegate trampoline will
2609 replace the 'this' argument with the delegate target making this activation
2610 no longer a root for the delegate.
2611 This is an issue for delegates that target collectible code such as dynamic
2612 methods of GC'able assemblies.
2614 For a test case look into #667921.
2616 FIXME: a dummy use is not the best way to do it as the local register allocator
2617 will put it on a caller save register and spil it around the call.
2618 Ideally, we would either put it on a callee save register or only do the store part.
2620 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2622 return (MonoInst*)call;
2625 if ((!cfg->compile_aot || enable_for_aot) &&
2626 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2627 (MONO_METHOD_IS_FINAL (method) &&
2628 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2629 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2631 * the method is not virtual, we just need to ensure this is not null
2632 * and then we can call the method directly.
2634 #ifndef DISABLE_REMOTING
2635 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2637 * The check above ensures method is not gshared, this is needed since
2638 * gshared methods can't have wrappers.
2640 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2644 if (!method->string_ctor)
2645 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2647 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2648 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2650 * the method is virtual, but we can statically dispatch since either
2651 * it's class or the method itself are sealed.
2652 * But first we need to ensure it's not a null reference.
2654 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2656 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2657 } else if (call_target) {
2658 vtable_reg = alloc_preg (cfg);
2659 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2661 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2662 call->inst.sreg1 = call_target->dreg;
2663 call->inst.flags &= !MONO_INST_HAS_METHOD;
2665 vtable_reg = alloc_preg (cfg);
2666 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2667 if (mono_class_is_interface (method->klass)) {
2668 guint32 imt_slot = mono_method_get_imt_slot (method);
2669 emit_imt_argument (cfg, call, call->method, imt_arg);
2670 slot_reg = vtable_reg;
2671 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2673 slot_reg = vtable_reg;
2674 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2675 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2677 g_assert (mono_method_signature (method)->generic_param_count);
2678 emit_imt_argument (cfg, call, call->method, imt_arg);
2682 call->inst.sreg1 = slot_reg;
2683 call->inst.inst_offset = offset;
2684 call->is_virtual = TRUE;
2688 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2691 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2693 return (MonoInst*)call;
2697 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2699 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2703 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2710 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2713 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2715 return (MonoInst*)call;
2719 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2721 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2725 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2729 * mono_emit_abs_call:
2731 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2733 inline static MonoInst*
2734 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2735 MonoMethodSignature *sig, MonoInst **args)
2737 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2741 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2744 if (cfg->abs_patches == NULL)
2745 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2746 g_hash_table_insert (cfg->abs_patches, ji, ji);
2747 ins = mono_emit_native_call (cfg, ji, sig, args);
2748 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2752 static MonoMethodSignature*
2753 sig_to_rgctx_sig (MonoMethodSignature *sig)
2755 // FIXME: memory allocation
2756 MonoMethodSignature *res;
2759 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2760 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2761 res->param_count = sig->param_count + 1;
2762 for (i = 0; i < sig->param_count; ++i)
2763 res->params [i] = sig->params [i];
2764 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2768 /* Make an indirect call to FSIG passing an additional argument */
2770 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2772 MonoMethodSignature *csig;
2773 MonoInst *args_buf [16];
2775 int i, pindex, tmp_reg;
2777 /* Make a call with an rgctx/extra arg */
2778 if (fsig->param_count + 2 < 16)
2781 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2784 args [pindex ++] = orig_args [0];
2785 for (i = 0; i < fsig->param_count; ++i)
2786 args [pindex ++] = orig_args [fsig->hasthis + i];
2787 tmp_reg = alloc_preg (cfg);
2788 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2789 csig = sig_to_rgctx_sig (fsig);
2790 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2793 /* Emit an indirect call to the function descriptor ADDR */
2795 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2797 int addr_reg, arg_reg;
2798 MonoInst *call_target;
2800 g_assert (cfg->llvm_only);
2803 * addr points to a <addr, arg> pair, load both of them, and
2804 * make a call to addr, passing arg as an extra arg.
2806 addr_reg = alloc_preg (cfg);
2807 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2808 arg_reg = alloc_preg (cfg);
2809 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2811 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2815 direct_icalls_enabled (MonoCompile *cfg)
2817 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2819 if (cfg->compile_llvm && !cfg->llvm_only)
2822 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2828 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2831 * Call the jit icall without a wrapper if possible.
2832 * The wrapper is needed for the following reasons:
2833 * - to handle exceptions thrown using mono_raise_exceptions () from the
2834 * icall function. The EH code needs the lmf frame pushed by the
2835 * wrapper to be able to unwind back to managed code.
2836 * - to be able to do stack walks for asynchronously suspended
2837 * threads when debugging.
2839 if (info->no_raise && direct_icalls_enabled (cfg)) {
2843 if (!info->wrapper_method) {
2844 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2845 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2847 mono_memory_barrier ();
2851 * Inline the wrapper method, which is basically a call to the C icall, and
2852 * an exception check.
2854 costs = inline_method (cfg, info->wrapper_method, NULL,
2855 args, NULL, il_offset, TRUE);
2856 g_assert (costs > 0);
2857 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2861 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2866 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2868 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2869 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2873 * Native code might return non register sized integers
2874 * without initializing the upper bits.
2876 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2877 case OP_LOADI1_MEMBASE:
2878 widen_op = OP_ICONV_TO_I1;
2880 case OP_LOADU1_MEMBASE:
2881 widen_op = OP_ICONV_TO_U1;
2883 case OP_LOADI2_MEMBASE:
2884 widen_op = OP_ICONV_TO_I2;
2886 case OP_LOADU2_MEMBASE:
2887 widen_op = OP_ICONV_TO_U2;
2893 if (widen_op != -1) {
2894 int dreg = alloc_preg (cfg);
2897 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2898 widen->type = ins->type;
2909 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
2911 MonoInst *args [16];
2913 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
2914 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
2916 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2920 get_memcpy_method (void)
2922 static MonoMethod *memcpy_method = NULL;
2923 if (!memcpy_method) {
2924 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2926 g_error ("Old corlib found. Install a new one");
2928 return memcpy_method;
2932 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2934 MonoClassField *field;
2935 gpointer iter = NULL;
2937 while ((field = mono_class_get_fields (klass, &iter))) {
2940 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2942 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2943 if (mini_type_is_reference (mono_field_get_type (field))) {
2944 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2945 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2947 MonoClass *field_class = mono_class_from_mono_type (field->type);
2948 if (field_class->has_references)
2949 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2955 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2957 int card_table_shift_bits;
2958 gpointer card_table_mask;
2960 MonoInst *dummy_use;
2961 int nursery_shift_bits;
2962 size_t nursery_size;
2964 if (!cfg->gen_write_barriers)
2967 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2969 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2971 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2974 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2975 wbarrier->sreg1 = ptr->dreg;
2976 wbarrier->sreg2 = value->dreg;
2977 MONO_ADD_INS (cfg->cbb, wbarrier);
2978 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
2979 int offset_reg = alloc_preg (cfg);
2983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2984 if (card_table_mask)
2985 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2987 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2988 * IMM's larger than 32bits.
2990 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2991 card_reg = ins->dreg;
2993 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2994 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2996 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2997 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3000 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3004 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3006 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3007 unsigned need_wb = 0;
3012 /*types with references can't have alignment smaller than sizeof(void*) */
3013 if (align < SIZEOF_VOID_P)
3016 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3017 if (size > 32 * SIZEOF_VOID_P)
3020 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3022 /* We don't unroll more than 5 stores to avoid code bloat. */
3023 if (size > 5 * SIZEOF_VOID_P) {
3024 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3025 size += (SIZEOF_VOID_P - 1);
3026 size &= ~(SIZEOF_VOID_P - 1);
3028 EMIT_NEW_ICONST (cfg, iargs [2], size);
3029 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3030 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3034 destreg = iargs [0]->dreg;
3035 srcreg = iargs [1]->dreg;
3038 dest_ptr_reg = alloc_preg (cfg);
3039 tmp_reg = alloc_preg (cfg);
3042 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3044 while (size >= SIZEOF_VOID_P) {
3045 MonoInst *load_inst;
3046 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3047 load_inst->dreg = tmp_reg;
3048 load_inst->inst_basereg = srcreg;
3049 load_inst->inst_offset = offset;
3050 MONO_ADD_INS (cfg->cbb, load_inst);
3052 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3055 emit_write_barrier (cfg, iargs [0], load_inst);
3057 offset += SIZEOF_VOID_P;
3058 size -= SIZEOF_VOID_P;
3061 /*tmp += sizeof (void*)*/
3062 if (size >= SIZEOF_VOID_P) {
3063 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3064 MONO_ADD_INS (cfg->cbb, iargs [0]);
3068 /* Those cannot be references since size < sizeof (void*) */
3070 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3071 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3077 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3078 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3084 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3085 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3094 * Emit code to copy a valuetype of type @klass whose address is stored in
3095 * @src->dreg to memory whose address is stored at @dest->dreg.
3098 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3100 MonoInst *iargs [4];
3103 MonoMethod *memcpy_method;
3104 MonoInst *size_ins = NULL;
3105 MonoInst *memcpy_ins = NULL;
3109 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3112 * This check breaks with spilled vars... need to handle it during verification anyway.
3113 * g_assert (klass && klass == src->klass && klass == dest->klass);
3116 if (mini_is_gsharedvt_klass (klass)) {
3118 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3119 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3123 n = mono_class_native_size (klass, &align);
3125 n = mono_class_value_size (klass, &align);
3127 /* if native is true there should be no references in the struct */
3128 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3129 /* Avoid barriers when storing to the stack */
3130 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3131 (dest->opcode == OP_LDADDR))) {
3137 context_used = mini_class_check_context_used (cfg, klass);
3139 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3140 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3142 } else if (context_used) {
3143 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3145 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3146 if (!cfg->compile_aot)
3147 mono_class_compute_gc_descriptor (klass);
3151 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3153 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3158 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3159 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3160 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3165 iargs [2] = size_ins;
3167 EMIT_NEW_ICONST (cfg, iargs [2], n);
3169 memcpy_method = get_memcpy_method ();
3171 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3173 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3178 get_memset_method (void)
3180 static MonoMethod *memset_method = NULL;
3181 if (!memset_method) {
3182 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3184 g_error ("Old corlib found. Install a new one");
3186 return memset_method;
3190 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3192 MonoInst *iargs [3];
3195 MonoMethod *memset_method;
3196 MonoInst *size_ins = NULL;
3197 MonoInst *bzero_ins = NULL;
3198 static MonoMethod *bzero_method;
3200 /* FIXME: Optimize this for the case when dest is an LDADDR */
3201 mono_class_init (klass);
3202 if (mini_is_gsharedvt_klass (klass)) {
3203 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3204 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3206 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3207 g_assert (bzero_method);
3209 iargs [1] = size_ins;
3210 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3214 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3216 n = mono_class_value_size (klass, &align);
3218 if (n <= sizeof (gpointer) * 8) {
3219 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3222 memset_method = get_memset_method ();
3224 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3225 EMIT_NEW_ICONST (cfg, iargs [2], n);
3226 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3233 * Emit IR to return either the this pointer for instance method,
3234 * or the mrgctx for static methods.
3237 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3239 MonoInst *this_ins = NULL;
3241 g_assert (cfg->gshared);
3243 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3244 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3245 !method->klass->valuetype)
3246 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3248 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3249 MonoInst *mrgctx_loc, *mrgctx_var;
3251 g_assert (!this_ins);
3252 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3254 mrgctx_loc = mono_get_vtable_var (cfg);
3255 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3258 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3259 MonoInst *vtable_loc, *vtable_var;
3261 g_assert (!this_ins);
3263 vtable_loc = mono_get_vtable_var (cfg);
3264 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3266 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3267 MonoInst *mrgctx_var = vtable_var;
3270 vtable_reg = alloc_preg (cfg);
3271 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3272 vtable_var->type = STACK_PTR;
3280 vtable_reg = alloc_preg (cfg);
3281 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3286 static MonoJumpInfoRgctxEntry *
3287 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3289 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3290 res->method = method;
3291 res->in_mrgctx = in_mrgctx;
3292 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3293 res->data->type = patch_type;
3294 res->data->data.target = patch_data;
3295 res->info_type = info_type;
3300 static inline MonoInst*
3301 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3303 MonoInst *args [16];
3306 // FIXME: No fastpath since the slot is not a compile time constant
3308 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3309 if (entry->in_mrgctx)
3310 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3312 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3316 * FIXME: This can be called during decompose, which is a problem since it creates
3318 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3320 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3322 MonoBasicBlock *is_null_bb, *end_bb;
3323 MonoInst *res, *ins, *call;
3326 slot = mini_get_rgctx_entry_slot (entry);
3328 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3329 index = MONO_RGCTX_SLOT_INDEX (slot);
3331 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3332 for (depth = 0; ; ++depth) {
3333 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3335 if (index < size - 1)
3340 NEW_BBLOCK (cfg, end_bb);
3341 NEW_BBLOCK (cfg, is_null_bb);
3344 rgctx_reg = rgctx->dreg;
3346 rgctx_reg = alloc_preg (cfg);
3348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3349 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3350 NEW_BBLOCK (cfg, is_null_bb);
3352 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3356 for (i = 0; i < depth; ++i) {
3357 int array_reg = alloc_preg (cfg);
3359 /* load ptr to next array */
3360 if (mrgctx && i == 0)
3361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3363 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3364 rgctx_reg = array_reg;
3365 /* is the ptr null? */
3366 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3367 /* if yes, jump to actual trampoline */
3368 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3372 val_reg = alloc_preg (cfg);
3373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3374 /* is the slot null? */
3375 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3376 /* if yes, jump to actual trampoline */
3377 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3380 res_reg = alloc_preg (cfg);
3381 MONO_INST_NEW (cfg, ins, OP_MOVE);
3382 ins->dreg = res_reg;
3383 ins->sreg1 = val_reg;
3384 MONO_ADD_INS (cfg->cbb, ins);
3386 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3389 MONO_START_BB (cfg, is_null_bb);
3391 EMIT_NEW_ICONST (cfg, args [1], index);
3393 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3395 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3396 MONO_INST_NEW (cfg, ins, OP_MOVE);
3397 ins->dreg = res_reg;
3398 ins->sreg1 = call->dreg;
3399 MONO_ADD_INS (cfg->cbb, ins);
3400 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3402 MONO_START_BB (cfg, end_bb);
3411 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3414 static inline MonoInst*
3415 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3418 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3420 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3424 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3425 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3427 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3428 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3430 return emit_rgctx_fetch (cfg, rgctx, entry);
3434 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3435 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3437 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3438 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3440 return emit_rgctx_fetch (cfg, rgctx, entry);
3444 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3445 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3447 MonoJumpInfoGSharedVtCall *call_info;
3448 MonoJumpInfoRgctxEntry *entry;
3451 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3452 call_info->sig = sig;
3453 call_info->method = cmethod;
3455 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3456 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3458 return emit_rgctx_fetch (cfg, rgctx, entry);
3462 * emit_get_rgctx_virt_method:
3464 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3467 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3468 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3470 MonoJumpInfoVirtMethod *info;
3471 MonoJumpInfoRgctxEntry *entry;
3474 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3475 info->klass = klass;
3476 info->method = virt_method;
3478 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3479 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3481 return emit_rgctx_fetch (cfg, rgctx, entry);
3485 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3486 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3488 MonoJumpInfoRgctxEntry *entry;
3491 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3492 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3494 return emit_rgctx_fetch (cfg, rgctx, entry);
3498 * emit_get_rgctx_method:
3500 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3501 * normal constants, else emit a load from the rgctx.
3504 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3505 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3507 if (!context_used) {
3510 switch (rgctx_type) {
3511 case MONO_RGCTX_INFO_METHOD:
3512 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3514 case MONO_RGCTX_INFO_METHOD_RGCTX:
3515 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3518 g_assert_not_reached ();
3521 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3522 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3524 return emit_rgctx_fetch (cfg, rgctx, entry);
3529 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3530 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3532 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3533 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3535 return emit_rgctx_fetch (cfg, rgctx, entry);
3539 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3541 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3542 MonoRuntimeGenericContextInfoTemplate *template_;
3547 for (i = 0; i < info->num_entries; ++i) {
3548 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3550 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3554 if (info->num_entries == info->count_entries) {
3555 MonoRuntimeGenericContextInfoTemplate *new_entries;
3556 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3558 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3560 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3561 info->entries = new_entries;
3562 info->count_entries = new_count_entries;
3565 idx = info->num_entries;
3566 template_ = &info->entries [idx];
3567 template_->info_type = rgctx_type;
3568 template_->data = data;
3570 info->num_entries ++;
3576 * emit_get_gsharedvt_info:
3578 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3581 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3586 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3587 /* Load info->entries [idx] */
3588 dreg = alloc_preg (cfg);
3589 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3595 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3597 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3601 * On return the caller must check @klass for load errors.
3604 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3606 MonoInst *vtable_arg;
3609 context_used = mini_class_check_context_used (cfg, klass);
3612 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3613 klass, MONO_RGCTX_INFO_VTABLE);
3615 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3619 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3622 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3626 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3627 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3629 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3630 ins->sreg1 = vtable_arg->dreg;
3631 MONO_ADD_INS (cfg->cbb, ins);
3634 MonoBasicBlock *inited_bb;
3635 MonoInst *args [16];
3637 inited_reg = alloc_ireg (cfg);
3639 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3641 NEW_BBLOCK (cfg, inited_bb);
3643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3644 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3646 args [0] = vtable_arg;
3647 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3649 MONO_START_BB (cfg, inited_bb);
3654 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3658 if (cfg->gen_seq_points && cfg->method == method) {
3659 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3661 ins->flags |= MONO_INST_NONEMPTY_STACK;
3662 MONO_ADD_INS (cfg->cbb, ins);
3667 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3669 if (mini_get_debug_options ()->better_cast_details) {
3670 int vtable_reg = alloc_preg (cfg);
3671 int klass_reg = alloc_preg (cfg);
3672 MonoBasicBlock *is_null_bb = NULL;
3674 int to_klass_reg, context_used;
3677 NEW_BBLOCK (cfg, is_null_bb);
3679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3683 tls_get = mono_get_jit_tls_intrinsic (cfg);
3685 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3689 MONO_ADD_INS (cfg->cbb, tls_get);
3690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3695 context_used = mini_class_check_context_used (cfg, klass);
3697 MonoInst *class_ins;
3699 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3700 to_klass_reg = class_ins->dreg;
3702 to_klass_reg = alloc_preg (cfg);
3703 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3708 MONO_START_BB (cfg, is_null_bb);
3713 mini_reset_cast_details (MonoCompile *cfg)
3715 /* Reset the variables holding the cast details */
3716 if (mini_get_debug_options ()->better_cast_details) {
3717 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3719 MONO_ADD_INS (cfg->cbb, tls_get);
3720 /* It is enough to reset the from field */
3721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3726 * On return the caller must check @array_class for load errors
3729 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3731 int vtable_reg = alloc_preg (cfg);
3734 context_used = mini_class_check_context_used (cfg, array_class);
3736 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3738 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3740 if (cfg->opt & MONO_OPT_SHARED) {
3741 int class_reg = alloc_preg (cfg);
3744 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3745 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3746 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3747 } else if (context_used) {
3748 MonoInst *vtable_ins;
3750 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3751 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3753 if (cfg->compile_aot) {
3757 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3759 vt_reg = alloc_preg (cfg);
3760 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3761 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3764 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3766 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3770 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3772 mini_reset_cast_details (cfg);
3776 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3777 * generic code is generated.
3780 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3782 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3785 MonoInst *rgctx, *addr;
3787 /* FIXME: What if the class is shared? We might not
3788 have to get the address of the method from the
3790 addr = emit_get_rgctx_method (cfg, context_used, method,
3791 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3792 if (cfg->llvm_only) {
3793 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3794 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3796 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3798 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3801 gboolean pass_vtable, pass_mrgctx;
3802 MonoInst *rgctx_arg = NULL;
3804 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3805 g_assert (!pass_mrgctx);
3808 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3811 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3814 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3819 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3823 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3824 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3825 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3826 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3828 obj_reg = sp [0]->dreg;
3829 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3830 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3832 /* FIXME: generics */
3833 g_assert (klass->rank == 0);
3836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3837 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3839 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3840 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3843 MonoInst *element_class;
3845 /* This assertion is from the unboxcast insn */
3846 g_assert (klass->rank == 0);
3848 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3849 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3851 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3852 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3854 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3855 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3856 mini_reset_cast_details (cfg);
3859 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3860 MONO_ADD_INS (cfg->cbb, add);
3861 add->type = STACK_MP;
3868 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3870 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3871 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3875 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3881 args [1] = klass_inst;
3884 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3886 NEW_BBLOCK (cfg, is_ref_bb);
3887 NEW_BBLOCK (cfg, is_nullable_bb);
3888 NEW_BBLOCK (cfg, end_bb);
3889 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3890 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3891 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3893 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3894 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3896 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3897 addr_reg = alloc_dreg (cfg, STACK_MP);
3901 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3902 MONO_ADD_INS (cfg->cbb, addr);
3904 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3907 MONO_START_BB (cfg, is_ref_bb);
3909 /* Save the ref to a temporary */
3910 dreg = alloc_ireg (cfg);
3911 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3912 addr->dreg = addr_reg;
3913 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3914 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3917 MONO_START_BB (cfg, is_nullable_bb);
3920 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3921 MonoInst *unbox_call;
3922 MonoMethodSignature *unbox_sig;
3924 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3925 unbox_sig->ret = &klass->byval_arg;
3926 unbox_sig->param_count = 1;
3927 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3930 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3932 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3934 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3935 addr->dreg = addr_reg;
3938 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3941 MONO_START_BB (cfg, end_bb);
3944 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3950 * Returns NULL and set the cfg exception on error.
3953 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3955 MonoInst *iargs [2];
3960 MonoRgctxInfoType rgctx_info;
3961 MonoInst *iargs [2];
3962 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3964 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3966 if (cfg->opt & MONO_OPT_SHARED)
3967 rgctx_info = MONO_RGCTX_INFO_KLASS;
3969 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3970 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3972 if (cfg->opt & MONO_OPT_SHARED) {
3973 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3975 alloc_ftn = ves_icall_object_new;
3978 alloc_ftn = ves_icall_object_new_specific;
3981 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3982 if (known_instance_size) {
3983 int size = mono_class_instance_size (klass);
3984 if (size < sizeof (MonoObject))
3985 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3987 EMIT_NEW_ICONST (cfg, iargs [1], size);
3989 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3992 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3995 if (cfg->opt & MONO_OPT_SHARED) {
3996 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3997 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3999 alloc_ftn = ves_icall_object_new;
4000 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4001 /* This happens often in argument checking code, eg. throw new FooException... */
4002 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4003 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4004 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4006 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4007 MonoMethod *managed_alloc = NULL;
4011 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4012 cfg->exception_ptr = klass;
4016 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4018 if (managed_alloc) {
4019 int size = mono_class_instance_size (klass);
4020 if (size < sizeof (MonoObject))
4021 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4023 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4024 EMIT_NEW_ICONST (cfg, iargs [1], size);
4025 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4027 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4029 guint32 lw = vtable->klass->instance_size;
4030 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4031 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4032 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4035 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4039 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4043 * Returns NULL and set the cfg exception on error.
4046 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4048 MonoInst *alloc, *ins;
4050 if (mono_class_is_nullable (klass)) {
4051 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4054 if (cfg->llvm_only && cfg->gsharedvt) {
4055 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4056 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4057 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4059 /* FIXME: What if the class is shared? We might not
4060 have to get the method address from the RGCTX. */
4061 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4062 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4063 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4065 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4068 gboolean pass_vtable, pass_mrgctx;
4069 MonoInst *rgctx_arg = NULL;
4071 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4072 g_assert (!pass_mrgctx);
4075 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4078 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4081 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4085 if (mini_is_gsharedvt_klass (klass)) {
4086 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4087 MonoInst *res, *is_ref, *src_var, *addr;
4090 dreg = alloc_ireg (cfg);
4092 NEW_BBLOCK (cfg, is_ref_bb);
4093 NEW_BBLOCK (cfg, is_nullable_bb);
4094 NEW_BBLOCK (cfg, end_bb);
4095 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4096 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4097 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4099 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4100 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4103 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4106 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4107 ins->opcode = OP_STOREV_MEMBASE;
4109 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4110 res->type = STACK_OBJ;
4112 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4115 MONO_START_BB (cfg, is_ref_bb);
4117 /* val is a vtype, so has to load the value manually */
4118 src_var = get_vreg_to_inst (cfg, val->dreg);
4120 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4121 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4122 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4123 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4126 MONO_START_BB (cfg, is_nullable_bb);
4129 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4130 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4132 MonoMethodSignature *box_sig;
4135 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4136 * construct that method at JIT time, so have to do things by hand.
4138 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4139 box_sig->ret = &mono_defaults.object_class->byval_arg;
4140 box_sig->param_count = 1;
4141 box_sig->params [0] = &klass->byval_arg;
4144 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4146 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4147 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4148 res->type = STACK_OBJ;
4152 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4154 MONO_START_BB (cfg, end_bb);
4158 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4162 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4167 static GHashTable* direct_icall_type_hash;
4170 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4172 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4173 if (!direct_icalls_enabled (cfg))
4177 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4178 * Whitelist a few icalls for now.
4180 if (!direct_icall_type_hash) {
4181 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4183 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4184 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4185 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4186 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4187 mono_memory_barrier ();
4188 direct_icall_type_hash = h;
4191 if (cmethod->klass == mono_defaults.math_class)
4193 /* No locking needed */
4194 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4200 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4202 if (cmethod->klass == mono_defaults.systemtype_class) {
4203 if (!strcmp (cmethod->name, "GetType"))
4209 static G_GNUC_UNUSED MonoInst*
4210 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4212 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4213 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4216 switch (enum_type->type) {
4219 #if SIZEOF_REGISTER == 8
4231 MonoInst *load, *and_, *cmp, *ceq;
4232 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4233 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4234 int dest_reg = alloc_ireg (cfg);
4236 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4237 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4238 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4239 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4241 ceq->type = STACK_I4;
4244 load = mono_decompose_opcode (cfg, load);
4245 and_ = mono_decompose_opcode (cfg, and_);
4246 cmp = mono_decompose_opcode (cfg, cmp);
4247 ceq = mono_decompose_opcode (cfg, ceq);
4255 * Returns NULL and set the cfg exception on error.
4257 static G_GNUC_UNUSED MonoInst*
4258 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4262 gpointer trampoline;
4263 MonoInst *obj, *method_ins, *tramp_ins;
4267 if (virtual_ && !cfg->llvm_only) {
4268 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4271 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4275 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4279 /* Inline the contents of mono_delegate_ctor */
4281 /* Set target field */
4282 /* Optimize away setting of NULL target */
4283 if (!MONO_INS_IS_PCONST_NULL (target)) {
4284 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4285 if (cfg->gen_write_barriers) {
4286 dreg = alloc_preg (cfg);
4287 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4288 emit_write_barrier (cfg, ptr, target);
4292 /* Set method field */
4293 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4294 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4297 * To avoid looking up the compiled code belonging to the target method
4298 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4299 * store it, and we fill it after the method has been compiled.
4301 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4302 MonoInst *code_slot_ins;
4305 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4307 domain = mono_domain_get ();
4308 mono_domain_lock (domain);
4309 if (!domain_jit_info (domain)->method_code_hash)
4310 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4311 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4313 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4314 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4316 mono_domain_unlock (domain);
4318 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4320 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4323 if (cfg->llvm_only) {
4324 MonoInst *args [16];
4329 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4330 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4333 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4339 if (cfg->compile_aot) {
4340 MonoDelegateClassMethodPair *del_tramp;
4342 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4343 del_tramp->klass = klass;
4344 del_tramp->method = context_used ? NULL : method;
4345 del_tramp->is_virtual = virtual_;
4346 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4349 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4351 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4352 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4355 /* Set invoke_impl field */
4357 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4359 dreg = alloc_preg (cfg);
4360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4361 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4363 dreg = alloc_preg (cfg);
4364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4365 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4368 dreg = alloc_preg (cfg);
4369 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4370 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4372 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4378 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4380 MonoJitICallInfo *info;
4382 /* Need to register the icall so it gets an icall wrapper */
4383 info = mono_get_array_new_va_icall (rank);
4385 cfg->flags |= MONO_CFG_HAS_VARARGS;
4387 /* mono_array_new_va () needs a vararg calling convention */
4388 cfg->exception_message = g_strdup ("array-new");
4389 cfg->disable_llvm = TRUE;
4391 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4392 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4396 * handle_constrained_gsharedvt_call:
4398 * Handle constrained calls where the receiver is a gsharedvt type.
4399 * Return the instruction representing the call. Set the cfg exception on failure.
4402 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4403 gboolean *ref_emit_widen)
4405 MonoInst *ins = NULL;
4406 gboolean emit_widen = *ref_emit_widen;
4409 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4410 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4411 * pack the arguments into an array, and do the rest of the work in in an icall.
4413 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4414 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
4415 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4416 MonoInst *args [16];
4419 * This case handles calls to
4420 * - object:ToString()/Equals()/GetHashCode(),
4421 * - System.IComparable<T>:CompareTo()
4422 * - System.IEquatable<T>:Equals ()
4423 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4427 if (mono_method_check_context_used (cmethod))
4428 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4430 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4431 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4433 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4434 if (fsig->hasthis && fsig->param_count) {
4435 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4436 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4437 ins->dreg = alloc_preg (cfg);
4438 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4439 MONO_ADD_INS (cfg->cbb, ins);
4442 if (mini_is_gsharedvt_type (fsig->params [0])) {
4443 int addr_reg, deref_arg_reg;
4445 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4446 deref_arg_reg = alloc_preg (cfg);
4447 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4448 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4450 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4451 addr_reg = ins->dreg;
4452 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4454 EMIT_NEW_ICONST (cfg, args [3], 0);
4455 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4458 EMIT_NEW_ICONST (cfg, args [3], 0);
4459 EMIT_NEW_ICONST (cfg, args [4], 0);
4461 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4464 if (mini_is_gsharedvt_type (fsig->ret)) {
4465 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4466 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4470 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4471 MONO_ADD_INS (cfg->cbb, add);
4473 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4474 MONO_ADD_INS (cfg->cbb, ins);
4475 /* ins represents the call result */
4478 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4481 *ref_emit_widen = emit_widen;
4490 mono_emit_load_got_addr (MonoCompile *cfg)
4492 MonoInst *getaddr, *dummy_use;
4494 if (!cfg->got_var || cfg->got_var_allocated)
4497 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4498 getaddr->cil_code = cfg->header->code;
4499 getaddr->dreg = cfg->got_var->dreg;
4501 /* Add it to the start of the first bblock */
4502 if (cfg->bb_entry->code) {
4503 getaddr->next = cfg->bb_entry->code;
4504 cfg->bb_entry->code = getaddr;
4507 MONO_ADD_INS (cfg->bb_entry, getaddr);
4509 cfg->got_var_allocated = TRUE;
4512 * Add a dummy use to keep the got_var alive, since real uses might
4513 * only be generated by the back ends.
4514 * Add it to end_bblock, so the variable's lifetime covers the whole
4516 * It would be better to make the usage of the got var explicit in all
4517 * cases when the backend needs it (i.e. calls, throw etc.), so this
4518 * wouldn't be needed.
4520 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4521 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4524 static int inline_limit;
4525 static gboolean inline_limit_inited;
4528 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4530 MonoMethodHeaderSummary header;
4532 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4533 MonoMethodSignature *sig = mono_method_signature (method);
4537 if (cfg->disable_inline)
4542 if (cfg->inline_depth > 10)
4545 if (!mono_method_get_header_summary (method, &header))
4548 /*runtime, icall and pinvoke are checked by summary call*/
4549 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4550 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4551 (mono_class_is_marshalbyref (method->klass)) ||
4555 /* also consider num_locals? */
4556 /* Do the size check early to avoid creating vtables */
4557 if (!inline_limit_inited) {
4558 if (g_getenv ("MONO_INLINELIMIT"))
4559 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4561 inline_limit = INLINE_LENGTH_LIMIT;
4562 inline_limit_inited = TRUE;
4564 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4568 * if we can initialize the class of the method right away, we do,
4569 * otherwise we don't allow inlining if the class needs initialization,
4570 * since it would mean inserting a call to mono_runtime_class_init()
4571 * inside the inlined code
4573 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4576 if (!(cfg->opt & MONO_OPT_SHARED)) {
4577 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4578 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4579 if (method->klass->has_cctor) {
4580 vtable = mono_class_vtable (cfg->domain, method->klass);
4583 if (!cfg->compile_aot) {
4585 if (!mono_runtime_class_init_full (vtable, &error)) {
4586 mono_error_cleanup (&error);
4591 } else if (mono_class_is_before_field_init (method->klass)) {
4592 if (cfg->run_cctors && method->klass->has_cctor) {
4593 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4594 if (!method->klass->runtime_info)
4595 /* No vtable created yet */
4597 vtable = mono_class_vtable (cfg->domain, method->klass);
4600 /* This makes so that inline cannot trigger */
4601 /* .cctors: too many apps depend on them */
4602 /* running with a specific order... */
4603 if (! vtable->initialized)
4606 if (!mono_runtime_class_init_full (vtable, &error)) {
4607 mono_error_cleanup (&error);
4611 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4612 if (!method->klass->runtime_info)
4613 /* No vtable created yet */
4615 vtable = mono_class_vtable (cfg->domain, method->klass);
4618 if (!vtable->initialized)
4623 * If we're compiling for shared code
4624 * the cctor will need to be run at aot method load time, for example,
4625 * or at the end of the compilation of the inlining method.
4627 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4631 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4632 if (mono_arch_is_soft_float ()) {
4634 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4636 for (i = 0; i < sig->param_count; ++i)
4637 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4642 if (g_list_find (cfg->dont_inline, method))
4649 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4651 if (!cfg->compile_aot) {
4653 if (vtable->initialized)
4657 if (mono_class_is_before_field_init (klass)) {
4658 if (cfg->method == method)
4662 if (!mono_class_needs_cctor_run (klass, method))
4665 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4666 /* The initialization is already done before the method is called */
4673 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4677 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4680 if (mini_is_gsharedvt_variable_klass (klass)) {
4683 mono_class_init (klass);
4684 size = mono_class_array_element_size (klass);
4687 mult_reg = alloc_preg (cfg);
4688 array_reg = arr->dreg;
4689 index_reg = index->dreg;
4691 #if SIZEOF_REGISTER == 8
4692 /* The array reg is 64 bits but the index reg is only 32 */
4693 if (COMPILE_LLVM (cfg)) {
4695 index2_reg = index_reg;
4697 index2_reg = alloc_preg (cfg);
4698 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4701 if (index->type == STACK_I8) {
4702 index2_reg = alloc_preg (cfg);
4703 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4705 index2_reg = index_reg;
4710 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4712 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4713 if (size == 1 || size == 2 || size == 4 || size == 8) {
4714 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4716 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4717 ins->klass = mono_class_get_element_class (klass);
4718 ins->type = STACK_MP;
4724 add_reg = alloc_ireg_mp (cfg);
4727 MonoInst *rgctx_ins;
4730 g_assert (cfg->gshared);
4731 context_used = mini_class_check_context_used (cfg, klass);
4732 g_assert (context_used);
4733 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4734 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4736 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4738 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4739 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4740 ins->klass = mono_class_get_element_class (klass);
4741 ins->type = STACK_MP;
4742 MONO_ADD_INS (cfg->cbb, ins);
4748 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4750 int bounds_reg = alloc_preg (cfg);
4751 int add_reg = alloc_ireg_mp (cfg);
4752 int mult_reg = alloc_preg (cfg);
4753 int mult2_reg = alloc_preg (cfg);
4754 int low1_reg = alloc_preg (cfg);
4755 int low2_reg = alloc_preg (cfg);
4756 int high1_reg = alloc_preg (cfg);
4757 int high2_reg = alloc_preg (cfg);
4758 int realidx1_reg = alloc_preg (cfg);
4759 int realidx2_reg = alloc_preg (cfg);
4760 int sum_reg = alloc_preg (cfg);
4761 int index1, index2, tmpreg;
4765 mono_class_init (klass);
4766 size = mono_class_array_element_size (klass);
4768 index1 = index_ins1->dreg;
4769 index2 = index_ins2->dreg;
4771 #if SIZEOF_REGISTER == 8
4772 /* The array reg is 64 bits but the index reg is only 32 */
4773 if (COMPILE_LLVM (cfg)) {
4776 tmpreg = alloc_preg (cfg);
4777 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4779 tmpreg = alloc_preg (cfg);
4780 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4784 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4788 /* range checking */
4789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4790 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4792 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4793 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4794 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4795 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4796 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4797 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4798 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4801 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4802 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4803 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4804 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4805 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4806 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4808 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4809 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4811 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4812 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4814 ins->type = STACK_MP;
4816 MONO_ADD_INS (cfg->cbb, ins);
4822 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4826 MonoMethod *addr_method;
4828 MonoClass *eclass = cmethod->klass->element_class;
4830 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4833 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4835 /* emit_ldelema_2 depends on OP_LMUL */
4836 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4837 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4840 if (mini_is_gsharedvt_variable_klass (eclass))
4843 element_size = mono_class_array_element_size (eclass);
4844 addr_method = mono_marshal_get_array_address (rank, element_size);
4845 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4850 static MonoBreakPolicy
4851 always_insert_breakpoint (MonoMethod *method)
4853 return MONO_BREAK_POLICY_ALWAYS;
4856 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4859 * mono_set_break_policy:
4860 * policy_callback: the new callback function
4862 * Allow embedders to decide wherther to actually obey breakpoint instructions
4863 * (both break IL instructions and Debugger.Break () method calls), for example
4864 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4865 * untrusted or semi-trusted code.
4867 * @policy_callback will be called every time a break point instruction needs to
4868 * be inserted with the method argument being the method that calls Debugger.Break()
4869 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4870 * if it wants the breakpoint to not be effective in the given method.
4871 * #MONO_BREAK_POLICY_ALWAYS is the default.
4874 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4876 if (policy_callback)
4877 break_policy_func = policy_callback;
4879 break_policy_func = always_insert_breakpoint;
4883 should_insert_brekpoint (MonoMethod *method) {
4884 switch (break_policy_func (method)) {
4885 case MONO_BREAK_POLICY_ALWAYS:
4887 case MONO_BREAK_POLICY_NEVER:
4889 case MONO_BREAK_POLICY_ON_DBG:
4890 g_warning ("mdb no longer supported");
4893 g_warning ("Incorrect value returned from break policy callback");
4898 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4900 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4902 MonoInst *addr, *store, *load;
4903 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4905 /* the bounds check is already done by the callers */
4906 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4908 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4909 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4910 if (mini_type_is_reference (&eklass->byval_arg))
4911 emit_write_barrier (cfg, addr, load);
4913 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4914 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4921 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4923 return mini_type_is_reference (&klass->byval_arg);
4927 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4929 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4930 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4931 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4932 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4933 MonoInst *iargs [3];
4936 mono_class_setup_vtable (obj_array);
4937 g_assert (helper->slot);
4939 if (sp [0]->type != STACK_OBJ)
4941 if (sp [2]->type != STACK_OBJ)
4948 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4952 if (mini_is_gsharedvt_variable_klass (klass)) {
4955 // FIXME-VT: OP_ICONST optimization
4956 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4957 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4958 ins->opcode = OP_STOREV_MEMBASE;
4959 } else if (sp [1]->opcode == OP_ICONST) {
4960 int array_reg = sp [0]->dreg;
4961 int index_reg = sp [1]->dreg;
4962 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4964 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4965 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4968 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4969 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4971 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4972 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4973 if (generic_class_is_reference_type (cfg, klass))
4974 emit_write_barrier (cfg, addr, sp [2]);
4981 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4986 eklass = mono_class_from_mono_type (fsig->params [2]);
4988 eklass = mono_class_from_mono_type (fsig->ret);
4991 return emit_array_store (cfg, eklass, args, FALSE);
4993 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4994 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5000 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5003 int param_size, return_size;
5005 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5006 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5008 if (cfg->verbose_level > 3)
5009 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5011 //Don't allow mixing reference types with value types
5012 if (param_klass->valuetype != return_klass->valuetype) {
5013 if (cfg->verbose_level > 3)
5014 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5018 if (!param_klass->valuetype) {
5019 if (cfg->verbose_level > 3)
5020 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5025 if (param_klass->has_references || return_klass->has_references)
5028 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5029 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5030 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5031 if (cfg->verbose_level > 3)
5032 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5036 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5037 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5038 if (cfg->verbose_level > 3)
5039 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5043 param_size = mono_class_value_size (param_klass, &align);
5044 return_size = mono_class_value_size (return_klass, &align);
5046 //We can do it if sizes match
5047 if (param_size == return_size) {
5048 if (cfg->verbose_level > 3)
5049 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5053 //No simple way to handle struct if sizes don't match
5054 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5055 if (cfg->verbose_level > 3)
5056 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5061 * Same reg size category.
5062 * A quick note on why we don't require widening here.
5063 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5065 * Since the source value comes from a function argument, the JIT will already have
5066 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5068 if (param_size <= 4 && return_size <= 4) {
5069 if (cfg->verbose_level > 3)
5070 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5078 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5080 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5081 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5083 if (mini_is_gsharedvt_variable_type (fsig->ret))
5086 //Valuetypes that are semantically equivalent or numbers than can be widened to
5087 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5090 //Arrays of valuetypes that are semantically equivalent
5091 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5098 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5100 #ifdef MONO_ARCH_SIMD_INTRINSICS
5101 MonoInst *ins = NULL;
5103 if (cfg->opt & MONO_OPT_SIMD) {
5104 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5110 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5114 emit_memory_barrier (MonoCompile *cfg, int kind)
5116 MonoInst *ins = NULL;
5117 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5118 MONO_ADD_INS (cfg->cbb, ins);
5119 ins->backend.memory_barrier_kind = kind;
5125 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5127 MonoInst *ins = NULL;
5130 /* The LLVM backend supports these intrinsics */
5131 if (cmethod->klass == mono_defaults.math_class) {
5132 if (strcmp (cmethod->name, "Sin") == 0) {
5134 } else if (strcmp (cmethod->name, "Cos") == 0) {
5136 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5138 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5142 if (opcode && fsig->param_count == 1) {
5143 MONO_INST_NEW (cfg, ins, opcode);
5144 ins->type = STACK_R8;
5145 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5146 ins->sreg1 = args [0]->dreg;
5147 MONO_ADD_INS (cfg->cbb, ins);
5151 if (cfg->opt & MONO_OPT_CMOV) {
5152 if (strcmp (cmethod->name, "Min") == 0) {
5153 if (fsig->params [0]->type == MONO_TYPE_I4)
5155 if (fsig->params [0]->type == MONO_TYPE_U4)
5156 opcode = OP_IMIN_UN;
5157 else if (fsig->params [0]->type == MONO_TYPE_I8)
5159 else if (fsig->params [0]->type == MONO_TYPE_U8)
5160 opcode = OP_LMIN_UN;
5161 } else if (strcmp (cmethod->name, "Max") == 0) {
5162 if (fsig->params [0]->type == MONO_TYPE_I4)
5164 if (fsig->params [0]->type == MONO_TYPE_U4)
5165 opcode = OP_IMAX_UN;
5166 else if (fsig->params [0]->type == MONO_TYPE_I8)
5168 else if (fsig->params [0]->type == MONO_TYPE_U8)
5169 opcode = OP_LMAX_UN;
5173 if (opcode && fsig->param_count == 2) {
5174 MONO_INST_NEW (cfg, ins, opcode);
5175 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5176 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5177 ins->sreg1 = args [0]->dreg;
5178 ins->sreg2 = args [1]->dreg;
5179 MONO_ADD_INS (cfg->cbb, ins);
5187 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5189 if (cmethod->klass == mono_defaults.array_class) {
5190 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5191 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5192 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5193 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5194 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5195 return emit_array_unsafe_mov (cfg, fsig, args);
5202 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5204 MonoInst *ins = NULL;
5206 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5208 if (cmethod->klass == mono_defaults.string_class) {
5209 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5210 int dreg = alloc_ireg (cfg);
5211 int index_reg = alloc_preg (cfg);
5212 int add_reg = alloc_preg (cfg);
5214 #if SIZEOF_REGISTER == 8
5215 if (COMPILE_LLVM (cfg)) {
5216 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5218 /* The array reg is 64 bits but the index reg is only 32 */
5219 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5222 index_reg = args [1]->dreg;
5224 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5226 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5227 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5228 add_reg = ins->dreg;
5229 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5232 int mult_reg = alloc_preg (cfg);
5233 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5234 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5235 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5236 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5238 type_from_op (cfg, ins, NULL, NULL);
5240 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5241 int dreg = alloc_ireg (cfg);
5242 /* Decompose later to allow more optimizations */
5243 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5244 ins->type = STACK_I4;
5245 ins->flags |= MONO_INST_FAULT;
5246 cfg->cbb->has_array_access = TRUE;
5247 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5252 } else if (cmethod->klass == mono_defaults.object_class) {
5253 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5254 int dreg = alloc_ireg_ref (cfg);
5255 int vt_reg = alloc_preg (cfg);
5256 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5257 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5258 type_from_op (cfg, ins, NULL, NULL);
5261 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5262 int dreg = alloc_ireg (cfg);
5263 int t1 = alloc_ireg (cfg);
5265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5266 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5267 ins->type = STACK_I4;
5270 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5271 MONO_INST_NEW (cfg, ins, OP_NOP);
5272 MONO_ADD_INS (cfg->cbb, ins);
5276 } else if (cmethod->klass == mono_defaults.array_class) {
5277 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5278 return emit_array_generic_access (cfg, fsig, args, FALSE);
5279 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5280 return emit_array_generic_access (cfg, fsig, args, TRUE);
5282 #ifndef MONO_BIG_ARRAYS
5284 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5287 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5288 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5289 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5290 int dreg = alloc_ireg (cfg);
5291 int bounds_reg = alloc_ireg_mp (cfg);
5292 MonoBasicBlock *end_bb, *szarray_bb;
5293 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5295 NEW_BBLOCK (cfg, end_bb);
5296 NEW_BBLOCK (cfg, szarray_bb);
5298 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5299 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5300 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5301 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5302 /* Non-szarray case */
5304 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5305 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5307 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5308 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5309 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5310 MONO_START_BB (cfg, szarray_bb);
5313 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5314 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5316 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5317 MONO_START_BB (cfg, end_bb);
5319 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5320 ins->type = STACK_I4;
5326 if (cmethod->name [0] != 'g')
5329 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5330 int dreg = alloc_ireg (cfg);
5331 int vtable_reg = alloc_preg (cfg);
5332 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5333 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5334 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5335 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5336 type_from_op (cfg, ins, NULL, NULL);
5339 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5340 int dreg = alloc_ireg (cfg);
5342 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5343 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5344 type_from_op (cfg, ins, NULL, NULL);
5349 } else if (cmethod->klass == runtime_helpers_class) {
5350 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5351 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5355 } else if (cmethod->klass == mono_defaults.monitor_class) {
5356 gboolean is_enter = FALSE;
5357 gboolean is_v4 = FALSE;
5359 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5363 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5368 * To make async stack traces work, icalls which can block should have a wrapper.
5369 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5371 MonoBasicBlock *end_bb;
5373 NEW_BBLOCK (cfg, end_bb);
5375 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5376 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5377 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5378 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
5379 MONO_START_BB (cfg, end_bb);
5382 } else if (cmethod->klass == mono_defaults.thread_class) {
5383 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5384 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5385 MONO_ADD_INS (cfg->cbb, ins);
5387 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5388 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5389 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5391 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5393 if (fsig->params [0]->type == MONO_TYPE_I1)
5394 opcode = OP_LOADI1_MEMBASE;
5395 else if (fsig->params [0]->type == MONO_TYPE_U1)
5396 opcode = OP_LOADU1_MEMBASE;
5397 else if (fsig->params [0]->type == MONO_TYPE_I2)
5398 opcode = OP_LOADI2_MEMBASE;
5399 else if (fsig->params [0]->type == MONO_TYPE_U2)
5400 opcode = OP_LOADU2_MEMBASE;
5401 else if (fsig->params [0]->type == MONO_TYPE_I4)
5402 opcode = OP_LOADI4_MEMBASE;
5403 else if (fsig->params [0]->type == MONO_TYPE_U4)
5404 opcode = OP_LOADU4_MEMBASE;
5405 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5406 opcode = OP_LOADI8_MEMBASE;
5407 else if (fsig->params [0]->type == MONO_TYPE_R4)
5408 opcode = OP_LOADR4_MEMBASE;
5409 else if (fsig->params [0]->type == MONO_TYPE_R8)
5410 opcode = OP_LOADR8_MEMBASE;
5411 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5412 opcode = OP_LOAD_MEMBASE;
5415 MONO_INST_NEW (cfg, ins, opcode);
5416 ins->inst_basereg = args [0]->dreg;
5417 ins->inst_offset = 0;
5418 MONO_ADD_INS (cfg->cbb, ins);
5420 switch (fsig->params [0]->type) {
5427 ins->dreg = mono_alloc_ireg (cfg);
5428 ins->type = STACK_I4;
5432 ins->dreg = mono_alloc_lreg (cfg);
5433 ins->type = STACK_I8;
5437 ins->dreg = mono_alloc_ireg (cfg);
5438 #if SIZEOF_REGISTER == 8
5439 ins->type = STACK_I8;
5441 ins->type = STACK_I4;
5446 ins->dreg = mono_alloc_freg (cfg);
5447 ins->type = STACK_R8;
5450 g_assert (mini_type_is_reference (fsig->params [0]));
5451 ins->dreg = mono_alloc_ireg_ref (cfg);
5452 ins->type = STACK_OBJ;
5456 if (opcode == OP_LOADI8_MEMBASE)
5457 ins = mono_decompose_opcode (cfg, ins);
5459 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5463 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5465 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5467 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5468 opcode = OP_STOREI1_MEMBASE_REG;
5469 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5470 opcode = OP_STOREI2_MEMBASE_REG;
5471 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5472 opcode = OP_STOREI4_MEMBASE_REG;
5473 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5474 opcode = OP_STOREI8_MEMBASE_REG;
5475 else if (fsig->params [0]->type == MONO_TYPE_R4)
5476 opcode = OP_STORER4_MEMBASE_REG;
5477 else if (fsig->params [0]->type == MONO_TYPE_R8)
5478 opcode = OP_STORER8_MEMBASE_REG;
5479 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5480 opcode = OP_STORE_MEMBASE_REG;
5483 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5485 MONO_INST_NEW (cfg, ins, opcode);
5486 ins->sreg1 = args [1]->dreg;
5487 ins->inst_destbasereg = args [0]->dreg;
5488 ins->inst_offset = 0;
5489 MONO_ADD_INS (cfg->cbb, ins);
5491 if (opcode == OP_STOREI8_MEMBASE_REG)
5492 ins = mono_decompose_opcode (cfg, ins);
5497 } else if (cmethod->klass->image == mono_defaults.corlib &&
5498 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5499 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5502 #if SIZEOF_REGISTER == 8
5503 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5504 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5505 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5506 ins->dreg = mono_alloc_preg (cfg);
5507 ins->sreg1 = args [0]->dreg;
5508 ins->type = STACK_I8;
5509 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5510 MONO_ADD_INS (cfg->cbb, ins);
5514 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5516 /* 64 bit reads are already atomic */
5517 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5518 load_ins->dreg = mono_alloc_preg (cfg);
5519 load_ins->inst_basereg = args [0]->dreg;
5520 load_ins->inst_offset = 0;
5521 load_ins->type = STACK_I8;
5522 MONO_ADD_INS (cfg->cbb, load_ins);
5524 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5531 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5532 MonoInst *ins_iconst;
5535 if (fsig->params [0]->type == MONO_TYPE_I4) {
5536 opcode = OP_ATOMIC_ADD_I4;
5537 cfg->has_atomic_add_i4 = TRUE;
5539 #if SIZEOF_REGISTER == 8
5540 else if (fsig->params [0]->type == MONO_TYPE_I8)
5541 opcode = OP_ATOMIC_ADD_I8;
5544 if (!mono_arch_opcode_supported (opcode))
5546 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5547 ins_iconst->inst_c0 = 1;
5548 ins_iconst->dreg = mono_alloc_ireg (cfg);
5549 MONO_ADD_INS (cfg->cbb, ins_iconst);
5551 MONO_INST_NEW (cfg, ins, opcode);
5552 ins->dreg = mono_alloc_ireg (cfg);
5553 ins->inst_basereg = args [0]->dreg;
5554 ins->inst_offset = 0;
5555 ins->sreg2 = ins_iconst->dreg;
5556 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5557 MONO_ADD_INS (cfg->cbb, ins);
5559 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5560 MonoInst *ins_iconst;
5563 if (fsig->params [0]->type == MONO_TYPE_I4) {
5564 opcode = OP_ATOMIC_ADD_I4;
5565 cfg->has_atomic_add_i4 = TRUE;
5567 #if SIZEOF_REGISTER == 8
5568 else if (fsig->params [0]->type == MONO_TYPE_I8)
5569 opcode = OP_ATOMIC_ADD_I8;
5572 if (!mono_arch_opcode_supported (opcode))
5574 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5575 ins_iconst->inst_c0 = -1;
5576 ins_iconst->dreg = mono_alloc_ireg (cfg);
5577 MONO_ADD_INS (cfg->cbb, ins_iconst);
5579 MONO_INST_NEW (cfg, ins, opcode);
5580 ins->dreg = mono_alloc_ireg (cfg);
5581 ins->inst_basereg = args [0]->dreg;
5582 ins->inst_offset = 0;
5583 ins->sreg2 = ins_iconst->dreg;
5584 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5585 MONO_ADD_INS (cfg->cbb, ins);
5587 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5590 if (fsig->params [0]->type == MONO_TYPE_I4) {
5591 opcode = OP_ATOMIC_ADD_I4;
5592 cfg->has_atomic_add_i4 = TRUE;
5594 #if SIZEOF_REGISTER == 8
5595 else if (fsig->params [0]->type == MONO_TYPE_I8)
5596 opcode = OP_ATOMIC_ADD_I8;
5599 if (!mono_arch_opcode_supported (opcode))
5601 MONO_INST_NEW (cfg, ins, opcode);
5602 ins->dreg = mono_alloc_ireg (cfg);
5603 ins->inst_basereg = args [0]->dreg;
5604 ins->inst_offset = 0;
5605 ins->sreg2 = args [1]->dreg;
5606 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5607 MONO_ADD_INS (cfg->cbb, ins);
5610 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5611 MonoInst *f2i = NULL, *i2f;
5612 guint32 opcode, f2i_opcode, i2f_opcode;
5613 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5614 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5616 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5617 fsig->params [0]->type == MONO_TYPE_R4) {
5618 opcode = OP_ATOMIC_EXCHANGE_I4;
5619 f2i_opcode = OP_MOVE_F_TO_I4;
5620 i2f_opcode = OP_MOVE_I4_TO_F;
5621 cfg->has_atomic_exchange_i4 = TRUE;
5623 #if SIZEOF_REGISTER == 8
5625 fsig->params [0]->type == MONO_TYPE_I8 ||
5626 fsig->params [0]->type == MONO_TYPE_R8 ||
5627 fsig->params [0]->type == MONO_TYPE_I) {
5628 opcode = OP_ATOMIC_EXCHANGE_I8;
5629 f2i_opcode = OP_MOVE_F_TO_I8;
5630 i2f_opcode = OP_MOVE_I8_TO_F;
5633 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5634 opcode = OP_ATOMIC_EXCHANGE_I4;
5635 cfg->has_atomic_exchange_i4 = TRUE;
5641 if (!mono_arch_opcode_supported (opcode))
5645 /* TODO: Decompose these opcodes instead of bailing here. */
5646 if (COMPILE_SOFT_FLOAT (cfg))
5649 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5650 f2i->dreg = mono_alloc_ireg (cfg);
5651 f2i->sreg1 = args [1]->dreg;
5652 if (f2i_opcode == OP_MOVE_F_TO_I4)
5653 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5654 MONO_ADD_INS (cfg->cbb, f2i);
5657 MONO_INST_NEW (cfg, ins, opcode);
5658 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5659 ins->inst_basereg = args [0]->dreg;
5660 ins->inst_offset = 0;
5661 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5662 MONO_ADD_INS (cfg->cbb, ins);
5664 switch (fsig->params [0]->type) {
5666 ins->type = STACK_I4;
5669 ins->type = STACK_I8;
5672 #if SIZEOF_REGISTER == 8
5673 ins->type = STACK_I8;
5675 ins->type = STACK_I4;
5680 ins->type = STACK_R8;
5683 g_assert (mini_type_is_reference (fsig->params [0]));
5684 ins->type = STACK_OBJ;
5689 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5690 i2f->dreg = mono_alloc_freg (cfg);
5691 i2f->sreg1 = ins->dreg;
5692 i2f->type = STACK_R8;
5693 if (i2f_opcode == OP_MOVE_I4_TO_F)
5694 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5695 MONO_ADD_INS (cfg->cbb, i2f);
5700 if (cfg->gen_write_barriers && is_ref)
5701 emit_write_barrier (cfg, args [0], args [1]);
5703 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5704 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5705 guint32 opcode, f2i_opcode, i2f_opcode;
5706 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5707 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5709 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5710 fsig->params [1]->type == MONO_TYPE_R4) {
5711 opcode = OP_ATOMIC_CAS_I4;
5712 f2i_opcode = OP_MOVE_F_TO_I4;
5713 i2f_opcode = OP_MOVE_I4_TO_F;
5714 cfg->has_atomic_cas_i4 = TRUE;
5716 #if SIZEOF_REGISTER == 8
5718 fsig->params [1]->type == MONO_TYPE_I8 ||
5719 fsig->params [1]->type == MONO_TYPE_R8 ||
5720 fsig->params [1]->type == MONO_TYPE_I) {
5721 opcode = OP_ATOMIC_CAS_I8;
5722 f2i_opcode = OP_MOVE_F_TO_I8;
5723 i2f_opcode = OP_MOVE_I8_TO_F;
5726 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5727 opcode = OP_ATOMIC_CAS_I4;
5728 cfg->has_atomic_cas_i4 = TRUE;
5734 if (!mono_arch_opcode_supported (opcode))
5738 /* TODO: Decompose these opcodes instead of bailing here. */
5739 if (COMPILE_SOFT_FLOAT (cfg))
5742 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5743 f2i_new->dreg = mono_alloc_ireg (cfg);
5744 f2i_new->sreg1 = args [1]->dreg;
5745 if (f2i_opcode == OP_MOVE_F_TO_I4)
5746 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5747 MONO_ADD_INS (cfg->cbb, f2i_new);
5749 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5750 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5751 f2i_cmp->sreg1 = args [2]->dreg;
5752 if (f2i_opcode == OP_MOVE_F_TO_I4)
5753 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5754 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5757 MONO_INST_NEW (cfg, ins, opcode);
5758 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5759 ins->sreg1 = args [0]->dreg;
5760 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5761 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5762 MONO_ADD_INS (cfg->cbb, ins);
5764 switch (fsig->params [1]->type) {
5766 ins->type = STACK_I4;
5769 ins->type = STACK_I8;
5772 #if SIZEOF_REGISTER == 8
5773 ins->type = STACK_I8;
5775 ins->type = STACK_I4;
5779 ins->type = cfg->r4_stack_type;
5782 ins->type = STACK_R8;
5785 g_assert (mini_type_is_reference (fsig->params [1]));
5786 ins->type = STACK_OBJ;
5791 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5792 i2f->dreg = mono_alloc_freg (cfg);
5793 i2f->sreg1 = ins->dreg;
5794 i2f->type = STACK_R8;
5795 if (i2f_opcode == OP_MOVE_I4_TO_F)
5796 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5797 MONO_ADD_INS (cfg->cbb, i2f);
5802 if (cfg->gen_write_barriers && is_ref)
5803 emit_write_barrier (cfg, args [0], args [1]);
5805 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5806 fsig->params [1]->type == MONO_TYPE_I4) {
5807 MonoInst *cmp, *ceq;
5809 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5812 /* int32 r = CAS (location, value, comparand); */
5813 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5814 ins->dreg = alloc_ireg (cfg);
5815 ins->sreg1 = args [0]->dreg;
5816 ins->sreg2 = args [1]->dreg;
5817 ins->sreg3 = args [2]->dreg;
5818 ins->type = STACK_I4;
5819 MONO_ADD_INS (cfg->cbb, ins);
5821 /* bool result = r == comparand; */
5822 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5823 cmp->sreg1 = ins->dreg;
5824 cmp->sreg2 = args [2]->dreg;
5825 cmp->type = STACK_I4;
5826 MONO_ADD_INS (cfg->cbb, cmp);
5828 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5829 ceq->dreg = alloc_ireg (cfg);
5830 ceq->type = STACK_I4;
5831 MONO_ADD_INS (cfg->cbb, ceq);
5833 /* *success = result; */
5834 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5836 cfg->has_atomic_cas_i4 = TRUE;
5838 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5839 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5843 } else if (cmethod->klass->image == mono_defaults.corlib &&
5844 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5845 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5848 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5850 MonoType *t = fsig->params [0];
5852 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5854 g_assert (t->byref);
5855 /* t is a byref type, so the reference check is more complicated */
5856 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5857 if (t->type == MONO_TYPE_I1)
5858 opcode = OP_ATOMIC_LOAD_I1;
5859 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5860 opcode = OP_ATOMIC_LOAD_U1;
5861 else if (t->type == MONO_TYPE_I2)
5862 opcode = OP_ATOMIC_LOAD_I2;
5863 else if (t->type == MONO_TYPE_U2)
5864 opcode = OP_ATOMIC_LOAD_U2;
5865 else if (t->type == MONO_TYPE_I4)
5866 opcode = OP_ATOMIC_LOAD_I4;
5867 else if (t->type == MONO_TYPE_U4)
5868 opcode = OP_ATOMIC_LOAD_U4;
5869 else if (t->type == MONO_TYPE_R4)
5870 opcode = OP_ATOMIC_LOAD_R4;
5871 else if (t->type == MONO_TYPE_R8)
5872 opcode = OP_ATOMIC_LOAD_R8;
5873 #if SIZEOF_REGISTER == 8
5874 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5875 opcode = OP_ATOMIC_LOAD_I8;
5876 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5877 opcode = OP_ATOMIC_LOAD_U8;
5879 else if (t->type == MONO_TYPE_I)
5880 opcode = OP_ATOMIC_LOAD_I4;
5881 else if (is_ref || t->type == MONO_TYPE_U)
5882 opcode = OP_ATOMIC_LOAD_U4;
5886 if (!mono_arch_opcode_supported (opcode))
5889 MONO_INST_NEW (cfg, ins, opcode);
5890 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5891 ins->sreg1 = args [0]->dreg;
5892 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5893 MONO_ADD_INS (cfg->cbb, ins);
5896 case MONO_TYPE_BOOLEAN:
5903 ins->type = STACK_I4;
5907 ins->type = STACK_I8;
5911 #if SIZEOF_REGISTER == 8
5912 ins->type = STACK_I8;
5914 ins->type = STACK_I4;
5918 ins->type = cfg->r4_stack_type;
5921 ins->type = STACK_R8;
5925 ins->type = STACK_OBJ;
5931 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5933 MonoType *t = fsig->params [0];
5936 g_assert (t->byref);
5937 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5938 if (t->type == MONO_TYPE_I1)
5939 opcode = OP_ATOMIC_STORE_I1;
5940 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5941 opcode = OP_ATOMIC_STORE_U1;
5942 else if (t->type == MONO_TYPE_I2)
5943 opcode = OP_ATOMIC_STORE_I2;
5944 else if (t->type == MONO_TYPE_U2)
5945 opcode = OP_ATOMIC_STORE_U2;
5946 else if (t->type == MONO_TYPE_I4)
5947 opcode = OP_ATOMIC_STORE_I4;
5948 else if (t->type == MONO_TYPE_U4)
5949 opcode = OP_ATOMIC_STORE_U4;
5950 else if (t->type == MONO_TYPE_R4)
5951 opcode = OP_ATOMIC_STORE_R4;
5952 else if (t->type == MONO_TYPE_R8)
5953 opcode = OP_ATOMIC_STORE_R8;
5954 #if SIZEOF_REGISTER == 8
5955 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5956 opcode = OP_ATOMIC_STORE_I8;
5957 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5958 opcode = OP_ATOMIC_STORE_U8;
5960 else if (t->type == MONO_TYPE_I)
5961 opcode = OP_ATOMIC_STORE_I4;
5962 else if (is_ref || t->type == MONO_TYPE_U)
5963 opcode = OP_ATOMIC_STORE_U4;
5967 if (!mono_arch_opcode_supported (opcode))
5970 MONO_INST_NEW (cfg, ins, opcode);
5971 ins->dreg = args [0]->dreg;
5972 ins->sreg1 = args [1]->dreg;
5973 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
5974 MONO_ADD_INS (cfg->cbb, ins);
5976 if (cfg->gen_write_barriers && is_ref)
5977 emit_write_barrier (cfg, args [0], args [1]);
5983 } else if (cmethod->klass->image == mono_defaults.corlib &&
5984 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
5985 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
5986 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
5987 if (should_insert_brekpoint (cfg->method)) {
5988 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5990 MONO_INST_NEW (cfg, ins, OP_NOP);
5991 MONO_ADD_INS (cfg->cbb, ins);
5995 } else if (cmethod->klass->image == mono_defaults.corlib &&
5996 (strcmp (cmethod->klass->name_space, "System") == 0) &&
5997 (strcmp (cmethod->klass->name, "Environment") == 0)) {
5998 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6000 EMIT_NEW_ICONST (cfg, ins, 1);
6002 EMIT_NEW_ICONST (cfg, ins, 0);
6005 } else if (cmethod->klass->image == mono_defaults.corlib &&
6006 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6007 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6008 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6009 /* No stack walks are currently available, so implement this as an intrinsic */
6010 MonoInst *assembly_ins;
6012 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6013 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6016 } else if (cmethod->klass->image == mono_defaults.corlib &&
6017 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6018 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6019 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6020 /* No stack walks are currently available, so implement this as an intrinsic */
6021 MonoInst *method_ins;
6022 MonoMethod *declaring = cfg->method;
6024 /* This returns the declaring generic method */
6025 if (declaring->is_inflated)
6026 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6027 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6028 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6029 cfg->no_inline = TRUE;
6030 if (cfg->method != cfg->current_method)
6031 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6034 } else if (cmethod->klass == mono_defaults.math_class) {
6036 * There is general branchless code for Min/Max, but it does not work for
6038 * http://everything2.com/?node_id=1051618
6040 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6041 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6042 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6043 ins->dreg = alloc_preg (cfg);
6044 ins->type = STACK_I4;
6045 MONO_ADD_INS (cfg->cbb, ins);
6047 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6048 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6049 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6050 !strcmp (cmethod->klass->name, "Selector")) ||
6051 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6052 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6053 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6054 !strcmp (cmethod->klass->name, "Selector"))
6056 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6057 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6058 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6061 MonoJumpInfoToken *ji;
6064 if (args [0]->opcode == OP_GOT_ENTRY) {
6065 pi = (MonoInst *)args [0]->inst_p1;
6066 g_assert (pi->opcode == OP_PATCH_INFO);
6067 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6068 ji = (MonoJumpInfoToken *)pi->inst_p0;
6070 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6071 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6074 NULLIFY_INS (args [0]);
6076 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6077 return_val_if_nok (&cfg->error, NULL);
6079 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6080 ins->dreg = mono_alloc_ireg (cfg);
6083 MONO_ADD_INS (cfg->cbb, ins);
6088 #ifdef MONO_ARCH_SIMD_INTRINSICS
6089 if (cfg->opt & MONO_OPT_SIMD) {
6090 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6096 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6100 if (COMPILE_LLVM (cfg)) {
6101 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6106 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6110 * This entry point could be used later for arbitrary method
6113 inline static MonoInst*
6114 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6115 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6117 if (method->klass == mono_defaults.string_class) {
6118 /* managed string allocation support */
6119 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6120 MonoInst *iargs [2];
6121 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6122 MonoMethod *managed_alloc = NULL;
6124 g_assert (vtable); /*Should not fail since it System.String*/
6125 #ifndef MONO_CROSS_COMPILE
6126 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6130 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6131 iargs [1] = args [0];
6132 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6139 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6141 MonoInst *store, *temp;
6144 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6145 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6148 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6149 * would be different than the MonoInst's used to represent arguments, and
6150 * the ldelema implementation can't deal with that.
6151 * Solution: When ldelema is used on an inline argument, create a var for
6152 * it, emit ldelema on that var, and emit the saving code below in
6153 * inline_method () if needed.
6155 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6156 cfg->args [i] = temp;
6157 /* This uses cfg->args [i] which is set by the preceeding line */
6158 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6159 store->cil_code = sp [0]->cil_code;
6164 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6165 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6167 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6169 check_inline_called_method_name_limit (MonoMethod *called_method)
6172 static const char *limit = NULL;
6174 if (limit == NULL) {
6175 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6177 if (limit_string != NULL)
6178 limit = limit_string;
6183 if (limit [0] != '\0') {
6184 char *called_method_name = mono_method_full_name (called_method, TRUE);
6186 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6187 g_free (called_method_name);
6189 //return (strncmp_result <= 0);
6190 return (strncmp_result == 0);
6197 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6199 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6202 static const char *limit = NULL;
6204 if (limit == NULL) {
6205 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6206 if (limit_string != NULL) {
6207 limit = limit_string;
6213 if (limit [0] != '\0') {
6214 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6216 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6217 g_free (caller_method_name);
6219 //return (strncmp_result <= 0);
6220 return (strncmp_result == 0);
6228 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6230 static double r8_0 = 0.0;
6231 static float r4_0 = 0.0;
6235 rtype = mini_get_underlying_type (rtype);
6239 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6240 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6241 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6242 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6243 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6244 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6245 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6246 ins->type = STACK_R4;
6247 ins->inst_p0 = (void*)&r4_0;
6249 MONO_ADD_INS (cfg->cbb, ins);
6250 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6251 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6252 ins->type = STACK_R8;
6253 ins->inst_p0 = (void*)&r8_0;
6255 MONO_ADD_INS (cfg->cbb, ins);
6256 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6257 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6258 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6259 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6260 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6262 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6267 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6271 rtype = mini_get_underlying_type (rtype);
6275 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6276 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6277 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6278 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6279 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6280 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6281 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6282 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6283 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6284 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6285 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6286 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6287 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6288 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6290 emit_init_rvar (cfg, dreg, rtype);
6294 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6296 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6298 MonoInst *var = cfg->locals [local];
6299 if (COMPILE_SOFT_FLOAT (cfg)) {
6301 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6302 emit_init_rvar (cfg, reg, type);
6303 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6306 emit_init_rvar (cfg, var->dreg, type);
6308 emit_dummy_init_rvar (cfg, var->dreg, type);
6313 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6315 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6321 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6324 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6325 guchar *ip, guint real_offset, gboolean inline_always)
6328 MonoInst *ins, *rvar = NULL;
6329 MonoMethodHeader *cheader;
6330 MonoBasicBlock *ebblock, *sbblock;
6332 MonoMethod *prev_inlined_method;
6333 MonoInst **prev_locals, **prev_args;
6334 MonoType **prev_arg_types;
6335 guint prev_real_offset;
6336 GHashTable *prev_cbb_hash;
6337 MonoBasicBlock **prev_cil_offset_to_bb;
6338 MonoBasicBlock *prev_cbb;
6339 const unsigned char *prev_ip;
6340 unsigned char *prev_cil_start;
6341 guint32 prev_cil_offset_to_bb_len;
6342 MonoMethod *prev_current_method;
6343 MonoGenericContext *prev_generic_context;
6344 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6346 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6348 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6349 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6352 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6353 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6358 fsig = mono_method_signature (cmethod);
6360 if (cfg->verbose_level > 2)
6361 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6363 if (!cmethod->inline_info) {
6364 cfg->stat_inlineable_methods++;
6365 cmethod->inline_info = 1;
6368 /* allocate local variables */
6369 cheader = mono_method_get_header_checked (cmethod, &error);
6371 if (inline_always) {
6372 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6373 mono_error_move (&cfg->error, &error);
6375 mono_error_cleanup (&error);
6380 /*Must verify before creating locals as it can cause the JIT to assert.*/
6381 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6382 mono_metadata_free_mh (cheader);
6386 /* allocate space to store the return value */
6387 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6388 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6391 prev_locals = cfg->locals;
6392 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6393 for (i = 0; i < cheader->num_locals; ++i)
6394 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6396 /* allocate start and end blocks */
6397 /* This is needed so if the inline is aborted, we can clean up */
6398 NEW_BBLOCK (cfg, sbblock);
6399 sbblock->real_offset = real_offset;
6401 NEW_BBLOCK (cfg, ebblock);
6402 ebblock->block_num = cfg->num_bblocks++;
6403 ebblock->real_offset = real_offset;
6405 prev_args = cfg->args;
6406 prev_arg_types = cfg->arg_types;
6407 prev_inlined_method = cfg->inlined_method;
6408 cfg->inlined_method = cmethod;
6409 cfg->ret_var_set = FALSE;
6410 cfg->inline_depth ++;
6411 prev_real_offset = cfg->real_offset;
6412 prev_cbb_hash = cfg->cbb_hash;
6413 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6414 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6415 prev_cil_start = cfg->cil_start;
6417 prev_cbb = cfg->cbb;
6418 prev_current_method = cfg->current_method;
6419 prev_generic_context = cfg->generic_context;
6420 prev_ret_var_set = cfg->ret_var_set;
6421 prev_disable_inline = cfg->disable_inline;
6423 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6426 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6428 ret_var_set = cfg->ret_var_set;
6430 cfg->inlined_method = prev_inlined_method;
6431 cfg->real_offset = prev_real_offset;
6432 cfg->cbb_hash = prev_cbb_hash;
6433 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6434 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6435 cfg->cil_start = prev_cil_start;
6437 cfg->locals = prev_locals;
6438 cfg->args = prev_args;
6439 cfg->arg_types = prev_arg_types;
6440 cfg->current_method = prev_current_method;
6441 cfg->generic_context = prev_generic_context;
6442 cfg->ret_var_set = prev_ret_var_set;
6443 cfg->disable_inline = prev_disable_inline;
6444 cfg->inline_depth --;
6446 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6447 if (cfg->verbose_level > 2)
6448 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6450 cfg->stat_inlined_methods++;
6452 /* always add some code to avoid block split failures */
6453 MONO_INST_NEW (cfg, ins, OP_NOP);
6454 MONO_ADD_INS (prev_cbb, ins);
6456 prev_cbb->next_bb = sbblock;
6457 link_bblock (cfg, prev_cbb, sbblock);
6460 * Get rid of the begin and end bblocks if possible to aid local
6463 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6465 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6466 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6468 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6469 MonoBasicBlock *prev = ebblock->in_bb [0];
6471 if (prev->next_bb == ebblock) {
6472 mono_merge_basic_blocks (cfg, prev, ebblock);
6474 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6475 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6476 cfg->cbb = prev_cbb;
6479 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6484 * Its possible that the rvar is set in some prev bblock, but not in others.
6490 for (i = 0; i < ebblock->in_count; ++i) {
6491 bb = ebblock->in_bb [i];
6493 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6496 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6506 * If the inlined method contains only a throw, then the ret var is not
6507 * set, so set it to a dummy value.
6510 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6512 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6515 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6518 if (cfg->verbose_level > 2)
6519 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6520 cfg->exception_type = MONO_EXCEPTION_NONE;
6522 /* This gets rid of the newly added bblocks */
6523 cfg->cbb = prev_cbb;
6525 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6530 * Some of these comments may well be out-of-date.
6531 * Design decisions: we do a single pass over the IL code (and we do bblock
6532 * splitting/merging in the few cases when it's required: a back jump to an IL
6533 * address that was not already seen as bblock starting point).
6534 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6535 * Complex operations are decomposed in simpler ones right away. We need to let the
6536 * arch-specific code peek and poke inside this process somehow (except when the
6537 * optimizations can take advantage of the full semantic info of coarse opcodes).
6538 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6539 * MonoInst->opcode initially is the IL opcode or some simplification of that
6540 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6541 * opcode with value bigger than OP_LAST.
6542 * At this point the IR can be handed over to an interpreter, a dumb code generator
6543 * or to the optimizing code generator that will translate it to SSA form.
6545 * Profiling directed optimizations.
6546 * We may compile by default with few or no optimizations and instrument the code
6547 * or the user may indicate what methods to optimize the most either in a config file
6548 * or through repeated runs where the compiler applies offline the optimizations to
6549 * each method and then decides if it was worth it.
6552 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6553 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6554 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6555 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6556 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6557 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6558 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6559 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6561 /* offset from br.s -> br like opcodes */
6562 #define BIG_BRANCH_OFFSET 13
6565 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6567 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6569 return b == NULL || b == bb;
6573 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6575 unsigned char *ip = start;
6576 unsigned char *target;
6579 MonoBasicBlock *bblock;
6580 const MonoOpcode *opcode;
6583 cli_addr = ip - start;
6584 i = mono_opcode_value ((const guint8 **)&ip, end);
6587 opcode = &mono_opcodes [i];
6588 switch (opcode->argument) {
6589 case MonoInlineNone:
6592 case MonoInlineString:
6593 case MonoInlineType:
6594 case MonoInlineField:
6595 case MonoInlineMethod:
6598 case MonoShortInlineR:
6605 case MonoShortInlineVar:
6606 case MonoShortInlineI:
6609 case MonoShortInlineBrTarget:
6610 target = start + cli_addr + 2 + (signed char)ip [1];
6611 GET_BBLOCK (cfg, bblock, target);
6614 GET_BBLOCK (cfg, bblock, ip);
6616 case MonoInlineBrTarget:
6617 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6618 GET_BBLOCK (cfg, bblock, target);
6621 GET_BBLOCK (cfg, bblock, ip);
6623 case MonoInlineSwitch: {
6624 guint32 n = read32 (ip + 1);
6627 cli_addr += 5 + 4 * n;
6628 target = start + cli_addr;
6629 GET_BBLOCK (cfg, bblock, target);
6631 for (j = 0; j < n; ++j) {
6632 target = start + cli_addr + (gint32)read32 (ip);
6633 GET_BBLOCK (cfg, bblock, target);
6643 g_assert_not_reached ();
6646 if (i == CEE_THROW) {
6647 unsigned char *bb_start = ip - 1;
6649 /* Find the start of the bblock containing the throw */
6651 while ((bb_start >= start) && !bblock) {
6652 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6656 bblock->out_of_line = 1;
6666 static inline MonoMethod *
6667 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6671 mono_error_init (error);
6673 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6674 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6676 method = mono_class_inflate_generic_method_checked (method, context, error);
6679 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6685 static inline MonoMethod *
6686 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6689 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6691 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6692 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6696 if (!method && !cfg)
6697 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6702 static inline MonoClass*
6703 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6708 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6709 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6711 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6712 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6715 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6716 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6719 mono_class_init (klass);
6723 static inline MonoMethodSignature*
6724 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6726 MonoMethodSignature *fsig;
6728 mono_error_init (error);
6729 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6730 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6732 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6733 return_val_if_nok (error, NULL);
6736 fsig = mono_inflate_generic_signature(fsig, context, error);
6742 throw_exception (void)
6744 static MonoMethod *method = NULL;
6747 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6748 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6755 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6757 MonoMethod *thrower = throw_exception ();
6760 EMIT_NEW_PCONST (cfg, args [0], ex);
6761 mono_emit_method_call (cfg, thrower, args, NULL);
6765 * Return the original method is a wrapper is specified. We can only access
6766 * the custom attributes from the original method.
6769 get_original_method (MonoMethod *method)
6771 if (method->wrapper_type == MONO_WRAPPER_NONE)
6774 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6775 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6778 /* in other cases we need to find the original method */
6779 return mono_marshal_method_from_wrapper (method);
6783 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6785 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6786 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6788 emit_throw_exception (cfg, ex);
6792 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6794 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6795 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6797 emit_throw_exception (cfg, ex);
6801 * Check that the IL instructions at ip are the array initialization
6802 * sequence and return the pointer to the data and the size.
6805 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6808 * newarr[System.Int32]
6810 * ldtoken field valuetype ...
6811 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6813 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6815 guint32 token = read32 (ip + 7);
6816 guint32 field_token = read32 (ip + 2);
6817 guint32 field_index = field_token & 0xffffff;
6819 const char *data_ptr;
6821 MonoMethod *cmethod;
6822 MonoClass *dummy_class;
6823 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6827 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6831 *out_field_token = field_token;
6833 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6836 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6838 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6839 case MONO_TYPE_BOOLEAN:
6843 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6844 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6845 case MONO_TYPE_CHAR:
6862 if (size > mono_type_size (field->type, &dummy_align))
6865 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6866 if (!image_is_dynamic (method->klass->image)) {
6867 field_index = read32 (ip + 2) & 0xffffff;
6868 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6869 data_ptr = mono_image_rva_map (method->klass->image, rva);
6870 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6871 /* for aot code we do the lookup on load */
6872 if (aot && data_ptr)
6873 return (const char *)GUINT_TO_POINTER (rva);
6875 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6877 data_ptr = mono_field_get_data (field);
6885 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6888 char *method_fname = mono_method_full_name (method, TRUE);
6890 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6893 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6894 mono_error_cleanup (&error);
6895 } else if (header->code_size == 0)
6896 method_code = g_strdup ("method body is empty.");
6898 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6899 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6900 g_free (method_fname);
6901 g_free (method_code);
6902 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6906 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6909 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6910 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6911 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6912 /* Optimize reg-reg moves away */
6914 * Can't optimize other opcodes, since sp[0] might point to
6915 * the last ins of a decomposed opcode.
6917 sp [0]->dreg = (cfg)->locals [n]->dreg;
6919 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6924 * ldloca inhibits many optimizations so try to get rid of it in common
6927 static inline unsigned char *
6928 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6938 local = read16 (ip + 2);
6942 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6943 /* From the INITOBJ case */
6944 token = read32 (ip + 2);
6945 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6946 CHECK_TYPELOAD (klass);
6947 type = mini_get_underlying_type (&klass->byval_arg);
6948 emit_init_local (cfg, local, type, TRUE);
6956 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
6958 MonoInst *icall_args [16];
6959 MonoInst *call_target, *ins, *vtable_ins;
6960 int arg_reg, this_reg, vtable_reg;
6961 gboolean is_iface = mono_class_is_interface (cmethod->klass);
6962 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
6963 gboolean variant_iface = FALSE;
6968 * In llvm-only mode, vtables contain function descriptors instead of
6969 * method addresses/trampolines.
6971 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
6974 slot = mono_method_get_imt_slot (cmethod);
6976 slot = mono_method_get_vtable_index (cmethod);
6978 this_reg = sp [0]->dreg;
6980 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
6981 variant_iface = TRUE;
6983 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
6985 * The simplest case, a normal virtual call.
6987 int slot_reg = alloc_preg (cfg);
6988 int addr_reg = alloc_preg (cfg);
6989 int arg_reg = alloc_preg (cfg);
6990 MonoBasicBlock *non_null_bb;
6992 vtable_reg = alloc_preg (cfg);
6993 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6994 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6996 /* Load the vtable slot, which contains a function descriptor. */
6997 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6999 NEW_BBLOCK (cfg, non_null_bb);
7001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7002 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7003 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7006 // FIXME: Make the wrapper use the preserveall cconv
7007 // FIXME: Use one icall per slot for small slot numbers ?
7008 icall_args [0] = vtable_ins;
7009 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7010 /* Make the icall return the vtable slot value to save some code space */
7011 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7012 ins->dreg = slot_reg;
7013 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7016 MONO_START_BB (cfg, non_null_bb);
7017 /* Load the address + arg from the vtable slot */
7018 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7019 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7021 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7024 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7026 * A simple interface call
7028 * We make a call through an imt slot to obtain the function descriptor we need to call.
7029 * The imt slot contains a function descriptor for a runtime function + arg.
7031 int slot_reg = alloc_preg (cfg);
7032 int addr_reg = alloc_preg (cfg);
7033 int arg_reg = alloc_preg (cfg);
7034 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7036 vtable_reg = alloc_preg (cfg);
7037 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7038 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7041 * The slot is already initialized when the vtable is created so there is no need
7045 /* Load the imt slot, which contains a function descriptor. */
7046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7048 /* Load the address + arg of the imt thunk from the imt slot */
7049 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7050 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7052 * IMT thunks in llvm-only mode are C functions which take an info argument
7053 * plus the imt method and return the ftndesc to call.
7055 icall_args [0] = thunk_arg_ins;
7056 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7057 cmethod, MONO_RGCTX_INFO_METHOD);
7058 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7060 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7063 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7065 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7066 * dynamically extended as more instantiations are discovered.
7067 * This handles generic virtual methods both on classes and interfaces.
7069 int slot_reg = alloc_preg (cfg);
7070 int addr_reg = alloc_preg (cfg);
7071 int arg_reg = alloc_preg (cfg);
7072 int ftndesc_reg = alloc_preg (cfg);
7073 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7074 MonoBasicBlock *slowpath_bb, *end_bb;
7076 NEW_BBLOCK (cfg, slowpath_bb);
7077 NEW_BBLOCK (cfg, end_bb);
7079 vtable_reg = alloc_preg (cfg);
7080 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7082 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7084 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7086 /* Load the slot, which contains a function descriptor. */
7087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7089 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7090 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7091 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7095 /* Same as with iface calls */
7096 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7097 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7098 icall_args [0] = thunk_arg_ins;
7099 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7100 cmethod, MONO_RGCTX_INFO_METHOD);
7101 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7102 ftndesc_ins->dreg = ftndesc_reg;
7104 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7105 * they don't know about yet. Fall back to the slowpath in that case.
7107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7108 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7110 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7113 MONO_START_BB (cfg, slowpath_bb);
7114 icall_args [0] = vtable_ins;
7115 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7116 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7117 cmethod, MONO_RGCTX_INFO_METHOD);
7119 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7121 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7122 ftndesc_ins->dreg = ftndesc_reg;
7123 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7126 MONO_START_BB (cfg, end_bb);
7127 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7131 * Non-optimized cases
7133 icall_args [0] = sp [0];
7134 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7136 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7137 cmethod, MONO_RGCTX_INFO_METHOD);
7139 arg_reg = alloc_preg (cfg);
7140 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7141 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7143 g_assert (is_gsharedvt);
7145 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7147 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7150 * Pass the extra argument even if the callee doesn't receive it, most
7151 * calling conventions allow this.
7153 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7157 is_exception_class (MonoClass *klass)
7160 if (klass == mono_defaults.exception_class)
7162 klass = klass->parent;
7168 * is_jit_optimizer_disabled:
7170 * Determine whenever M's assembly has a DebuggableAttribute with the
7171 * IsJITOptimizerDisabled flag set.
7174 is_jit_optimizer_disabled (MonoMethod *m)
7177 MonoAssembly *ass = m->klass->image->assembly;
7178 MonoCustomAttrInfo* attrs;
7181 gboolean val = FALSE;
7184 if (ass->jit_optimizer_disabled_inited)
7185 return ass->jit_optimizer_disabled;
7187 klass = mono_class_try_get_debuggable_attribute_class ();
7191 ass->jit_optimizer_disabled = FALSE;
7192 mono_memory_barrier ();
7193 ass->jit_optimizer_disabled_inited = TRUE;
7197 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7198 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7200 for (i = 0; i < attrs->num_attrs; ++i) {
7201 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7203 MonoMethodSignature *sig;
7205 if (!attr->ctor || attr->ctor->klass != klass)
7207 /* Decode the attribute. See reflection.c */
7208 p = (const char*)attr->data;
7209 g_assert (read16 (p) == 0x0001);
7212 // FIXME: Support named parameters
7213 sig = mono_method_signature (attr->ctor);
7214 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7216 /* Two boolean arguments */
7220 mono_custom_attrs_free (attrs);
7223 ass->jit_optimizer_disabled = val;
7224 mono_memory_barrier ();
7225 ass->jit_optimizer_disabled_inited = TRUE;
7231 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7233 gboolean supported_tail_call;
7236 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7238 for (i = 0; i < fsig->param_count; ++i) {
7239 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7240 /* These can point to the current method's stack */
7241 supported_tail_call = FALSE;
7243 if (fsig->hasthis && cmethod->klass->valuetype)
7244 /* this might point to the current method's stack */
7245 supported_tail_call = FALSE;
7246 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7247 supported_tail_call = FALSE;
7248 if (cfg->method->save_lmf)
7249 supported_tail_call = FALSE;
7250 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7251 supported_tail_call = FALSE;
7252 if (call_opcode != CEE_CALL)
7253 supported_tail_call = FALSE;
7255 /* Debugging support */
7257 if (supported_tail_call) {
7258 if (!mono_debug_count ())
7259 supported_tail_call = FALSE;
7263 return supported_tail_call;
7269 * Handle calls made to ctors from NEWOBJ opcodes.
7272 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7273 MonoInst **sp, guint8 *ip, int *inline_costs)
7275 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7277 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7278 mono_method_is_generic_sharable (cmethod, TRUE)) {
7279 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7280 mono_class_vtable (cfg->domain, cmethod->klass);
7281 CHECK_TYPELOAD (cmethod->klass);
7283 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7284 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7287 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7288 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7290 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7292 CHECK_TYPELOAD (cmethod->klass);
7293 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7298 /* Avoid virtual calls to ctors if possible */
7299 if (mono_class_is_marshalbyref (cmethod->klass))
7300 callvirt_this_arg = sp [0];
7302 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7303 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7304 CHECK_CFG_EXCEPTION;
7305 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7306 mono_method_check_inlining (cfg, cmethod) &&
7307 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7310 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7311 cfg->real_offset += 5;
7313 *inline_costs += costs - 5;
7315 INLINE_FAILURE ("inline failure");
7316 // FIXME-VT: Clean this up
7317 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7318 GSHAREDVT_FAILURE(*ip);
7319 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7321 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7324 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7326 if (cfg->llvm_only) {
7327 // FIXME: Avoid initializing vtable_arg
7328 emit_llvmonly_calli (cfg, fsig, sp, addr);
7330 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7332 } else if (context_used &&
7333 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7334 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7335 MonoInst *cmethod_addr;
7337 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7339 if (cfg->llvm_only) {
7340 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7341 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7342 emit_llvmonly_calli (cfg, fsig, sp, addr);
7344 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7345 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7347 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7350 INLINE_FAILURE ("ctor call");
7351 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7352 callvirt_this_arg, NULL, vtable_arg);
7359 emit_setret (MonoCompile *cfg, MonoInst *val)
7361 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7364 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7367 if (!cfg->vret_addr) {
7368 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7370 EMIT_NEW_RETLOADA (cfg, ret_addr);
7372 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7373 ins->klass = mono_class_from_mono_type (ret_type);
7376 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7377 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7378 MonoInst *iargs [1];
7382 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7383 mono_arch_emit_setret (cfg, cfg->method, conv);
7385 mono_arch_emit_setret (cfg, cfg->method, val);
7388 mono_arch_emit_setret (cfg, cfg->method, val);
7394 * mono_method_to_ir:
7396 * Translate the .net IL into linear IR.
7398 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7399 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7400 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7401 * @inline_args: if not NULL, contains the arguments to the inline call
7402 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7403 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7405 * This method is used to turn ECMA IL into Mono's internal Linear IR
7406 * reprensetation. It is used both for entire methods, as well as
7407 * inlining existing methods. In the former case, the @start_bblock,
7408 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7409 * inline_offset is set to zero.
7411 * Returns: the inline cost, or -1 if there was an error processing this method.
7414 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7415 MonoInst *return_var, MonoInst **inline_args,
7416 guint inline_offset, gboolean is_virtual_call)
7419 MonoInst *ins, **sp, **stack_start;
7420 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7421 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7422 MonoMethod *cmethod, *method_definition;
7423 MonoInst **arg_array;
7424 MonoMethodHeader *header;
7426 guint32 token, ins_flag;
7428 MonoClass *constrained_class = NULL;
7429 unsigned char *ip, *end, *target, *err_pos;
7430 MonoMethodSignature *sig;
7431 MonoGenericContext *generic_context = NULL;
7432 MonoGenericContainer *generic_container = NULL;
7433 MonoType **param_types;
7434 int i, n, start_new_bblock, dreg;
7435 int num_calls = 0, inline_costs = 0;
7436 int breakpoint_id = 0;
7438 GSList *class_inits = NULL;
7439 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7441 gboolean init_locals, seq_points, skip_dead_blocks;
7442 gboolean sym_seq_points = FALSE;
7443 MonoDebugMethodInfo *minfo;
7444 MonoBitSet *seq_point_locs = NULL;
7445 MonoBitSet *seq_point_set_locs = NULL;
7447 cfg->disable_inline = is_jit_optimizer_disabled (method);
7449 /* serialization and xdomain stuff may need access to private fields and methods */
7450 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7451 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7452 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7453 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7454 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7455 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7457 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7458 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7459 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7460 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7461 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7463 image = method->klass->image;
7464 header = mono_method_get_header_checked (method, &cfg->error);
7466 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7467 goto exception_exit;
7469 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7472 generic_container = mono_method_get_generic_container (method);
7473 sig = mono_method_signature (method);
7474 num_args = sig->hasthis + sig->param_count;
7475 ip = (unsigned char*)header->code;
7476 cfg->cil_start = ip;
7477 end = ip + header->code_size;
7478 cfg->stat_cil_code_size += header->code_size;
7480 seq_points = cfg->gen_seq_points && cfg->method == method;
7482 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7483 /* We could hit a seq point before attaching to the JIT (#8338) */
7487 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7488 minfo = mono_debug_lookup_method (method);
7490 MonoSymSeqPoint *sps;
7491 int i, n_il_offsets;
7493 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7494 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7495 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7496 sym_seq_points = TRUE;
7497 for (i = 0; i < n_il_offsets; ++i) {
7498 if (sps [i].il_offset < header->code_size)
7499 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7502 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7503 /* Methods without line number info like auto-generated property accessors */
7504 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7505 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7506 sym_seq_points = TRUE;
7511 * Methods without init_locals set could cause asserts in various passes
7512 * (#497220). To work around this, we emit dummy initialization opcodes
7513 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7514 * on some platforms.
7516 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7517 init_locals = header->init_locals;
7521 method_definition = method;
7522 while (method_definition->is_inflated) {
7523 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7524 method_definition = imethod->declaring;
7527 /* SkipVerification is not allowed if core-clr is enabled */
7528 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7530 dont_verify_stloc = TRUE;
7533 if (sig->is_inflated)
7534 generic_context = mono_method_get_context (method);
7535 else if (generic_container)
7536 generic_context = &generic_container->context;
7537 cfg->generic_context = generic_context;
7540 g_assert (!sig->has_type_parameters);
7542 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7543 g_assert (method->is_inflated);
7544 g_assert (mono_method_get_context (method)->method_inst);
7546 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7547 g_assert (sig->generic_param_count);
7549 if (cfg->method == method) {
7550 cfg->real_offset = 0;
7552 cfg->real_offset = inline_offset;
7555 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7556 cfg->cil_offset_to_bb_len = header->code_size;
7558 cfg->current_method = method;
7560 if (cfg->verbose_level > 2)
7561 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7563 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7565 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7566 for (n = 0; n < sig->param_count; ++n)
7567 param_types [n + sig->hasthis] = sig->params [n];
7568 cfg->arg_types = param_types;
7570 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7571 if (cfg->method == method) {
7573 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7574 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7577 NEW_BBLOCK (cfg, start_bblock);
7578 cfg->bb_entry = start_bblock;
7579 start_bblock->cil_code = NULL;
7580 start_bblock->cil_length = 0;
7583 NEW_BBLOCK (cfg, end_bblock);
7584 cfg->bb_exit = end_bblock;
7585 end_bblock->cil_code = NULL;
7586 end_bblock->cil_length = 0;
7587 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7588 g_assert (cfg->num_bblocks == 2);
7590 arg_array = cfg->args;
7592 if (header->num_clauses) {
7593 cfg->spvars = g_hash_table_new (NULL, NULL);
7594 cfg->exvars = g_hash_table_new (NULL, NULL);
7596 /* handle exception clauses */
7597 for (i = 0; i < header->num_clauses; ++i) {
7598 MonoBasicBlock *try_bb;
7599 MonoExceptionClause *clause = &header->clauses [i];
7600 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7602 try_bb->real_offset = clause->try_offset;
7603 try_bb->try_start = TRUE;
7604 try_bb->region = ((i + 1) << 8) | clause->flags;
7605 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7606 tblock->real_offset = clause->handler_offset;
7607 tblock->flags |= BB_EXCEPTION_HANDLER;
7610 * Linking the try block with the EH block hinders inlining as we won't be able to
7611 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7613 if (COMPILE_LLVM (cfg))
7614 link_bblock (cfg, try_bb, tblock);
7616 if (*(ip + clause->handler_offset) == CEE_POP)
7617 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7619 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7620 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7621 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7622 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7623 MONO_ADD_INS (tblock, ins);
7625 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7626 /* finally clauses already have a seq point */
7627 /* seq points for filter clauses are emitted below */
7628 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7629 MONO_ADD_INS (tblock, ins);
7632 /* todo: is a fault block unsafe to optimize? */
7633 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7634 tblock->flags |= BB_EXCEPTION_UNSAFE;
7637 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7639 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7641 /* catch and filter blocks get the exception object on the stack */
7642 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7643 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7645 /* mostly like handle_stack_args (), but just sets the input args */
7646 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7647 tblock->in_scount = 1;
7648 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7649 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7653 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7654 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7655 if (!cfg->compile_llvm) {
7656 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7657 ins->dreg = tblock->in_stack [0]->dreg;
7658 MONO_ADD_INS (tblock, ins);
7661 MonoInst *dummy_use;
7664 * Add a dummy use for the exvar so its liveness info will be
7667 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7670 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7671 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7672 MONO_ADD_INS (tblock, ins);
7675 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7676 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7677 tblock->flags |= BB_EXCEPTION_HANDLER;
7678 tblock->real_offset = clause->data.filter_offset;
7679 tblock->in_scount = 1;
7680 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7681 /* The filter block shares the exvar with the handler block */
7682 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7683 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7684 MONO_ADD_INS (tblock, ins);
7688 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7689 clause->data.catch_class &&
7691 mono_class_check_context_used (clause->data.catch_class)) {
7693 * In shared generic code with catch
7694 * clauses containing type variables
7695 * the exception handling code has to
7696 * be able to get to the rgctx.
7697 * Therefore we have to make sure that
7698 * the vtable/mrgctx argument (for
7699 * static or generic methods) or the
7700 * "this" argument (for non-static
7701 * methods) are live.
7703 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7704 mini_method_get_context (method)->method_inst ||
7705 method->klass->valuetype) {
7706 mono_get_vtable_var (cfg);
7708 MonoInst *dummy_use;
7710 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7715 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7716 cfg->cbb = start_bblock;
7717 cfg->args = arg_array;
7718 mono_save_args (cfg, sig, inline_args);
7721 /* FIRST CODE BLOCK */
7722 NEW_BBLOCK (cfg, tblock);
7723 tblock->cil_code = ip;
7727 ADD_BBLOCK (cfg, tblock);
7729 if (cfg->method == method) {
7730 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7731 if (breakpoint_id) {
7732 MONO_INST_NEW (cfg, ins, OP_BREAK);
7733 MONO_ADD_INS (cfg->cbb, ins);
7737 /* we use a separate basic block for the initialization code */
7738 NEW_BBLOCK (cfg, init_localsbb);
7739 if (cfg->method == method)
7740 cfg->bb_init = init_localsbb;
7741 init_localsbb->real_offset = cfg->real_offset;
7742 start_bblock->next_bb = init_localsbb;
7743 init_localsbb->next_bb = cfg->cbb;
7744 link_bblock (cfg, start_bblock, init_localsbb);
7745 link_bblock (cfg, init_localsbb, cfg->cbb);
7747 cfg->cbb = init_localsbb;
7749 if (cfg->gsharedvt && cfg->method == method) {
7750 MonoGSharedVtMethodInfo *info;
7751 MonoInst *var, *locals_var;
7754 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7755 info->method = cfg->method;
7756 info->count_entries = 16;
7757 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7758 cfg->gsharedvt_info = info;
7760 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7761 /* prevent it from being register allocated */
7762 //var->flags |= MONO_INST_VOLATILE;
7763 cfg->gsharedvt_info_var = var;
7765 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7766 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7768 /* Allocate locals */
7769 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7770 /* prevent it from being register allocated */
7771 //locals_var->flags |= MONO_INST_VOLATILE;
7772 cfg->gsharedvt_locals_var = locals_var;
7774 dreg = alloc_ireg (cfg);
7775 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7777 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7778 ins->dreg = locals_var->dreg;
7780 MONO_ADD_INS (cfg->cbb, ins);
7781 cfg->gsharedvt_locals_var_ins = ins;
7783 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7786 ins->flags |= MONO_INST_INIT;
7790 if (mono_security_core_clr_enabled ()) {
7791 /* check if this is native code, e.g. an icall or a p/invoke */
7792 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7793 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7795 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7796 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7798 /* if this ia a native call then it can only be JITted from platform code */
7799 if ((icall || pinvk) && method->klass && method->klass->image) {
7800 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7801 MonoException *ex = icall ? mono_get_exception_security () :
7802 mono_get_exception_method_access ();
7803 emit_throw_exception (cfg, ex);
7810 CHECK_CFG_EXCEPTION;
7812 if (header->code_size == 0)
7815 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7820 if (cfg->method == method)
7821 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7823 for (n = 0; n < header->num_locals; ++n) {
7824 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7829 /* We force the vtable variable here for all shared methods
7830 for the possibility that they might show up in a stack
7831 trace where their exact instantiation is needed. */
7832 if (cfg->gshared && method == cfg->method) {
7833 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7834 mini_method_get_context (method)->method_inst ||
7835 method->klass->valuetype) {
7836 mono_get_vtable_var (cfg);
7838 /* FIXME: Is there a better way to do this?
7839 We need the variable live for the duration
7840 of the whole method. */
7841 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7845 /* add a check for this != NULL to inlined methods */
7846 if (is_virtual_call) {
7849 NEW_ARGLOAD (cfg, arg_ins, 0);
7850 MONO_ADD_INS (cfg->cbb, arg_ins);
7851 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7854 skip_dead_blocks = !dont_verify;
7855 if (skip_dead_blocks) {
7856 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7861 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7862 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7865 start_new_bblock = 0;
7867 if (cfg->method == method)
7868 cfg->real_offset = ip - header->code;
7870 cfg->real_offset = inline_offset;
7875 if (start_new_bblock) {
7876 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7877 if (start_new_bblock == 2) {
7878 g_assert (ip == tblock->cil_code);
7880 GET_BBLOCK (cfg, tblock, ip);
7882 cfg->cbb->next_bb = tblock;
7884 start_new_bblock = 0;
7885 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7886 if (cfg->verbose_level > 3)
7887 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7888 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7892 g_slist_free (class_inits);
7895 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7896 link_bblock (cfg, cfg->cbb, tblock);
7897 if (sp != stack_start) {
7898 handle_stack_args (cfg, stack_start, sp - stack_start);
7900 CHECK_UNVERIFIABLE (cfg);
7902 cfg->cbb->next_bb = tblock;
7904 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7905 if (cfg->verbose_level > 3)
7906 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7907 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7910 g_slist_free (class_inits);
7915 if (skip_dead_blocks) {
7916 int ip_offset = ip - header->code;
7918 if (ip_offset == bb->end)
7922 int op_size = mono_opcode_size (ip, end);
7923 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7925 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7927 if (ip_offset + op_size == bb->end) {
7928 MONO_INST_NEW (cfg, ins, OP_NOP);
7929 MONO_ADD_INS (cfg->cbb, ins);
7930 start_new_bblock = 1;
7938 * Sequence points are points where the debugger can place a breakpoint.
7939 * Currently, we generate these automatically at points where the IL
7942 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7944 * Make methods interruptable at the beginning, and at the targets of
7945 * backward branches.
7946 * Also, do this at the start of every bblock in methods with clauses too,
7947 * to be able to handle instructions with inprecise control flow like
7949 * Backward branches are handled at the end of method-to-ir ().
7951 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7952 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
7954 /* Avoid sequence points on empty IL like .volatile */
7955 // FIXME: Enable this
7956 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7957 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7958 if ((sp != stack_start) && !sym_seq_point)
7959 ins->flags |= MONO_INST_NONEMPTY_STACK;
7960 MONO_ADD_INS (cfg->cbb, ins);
7963 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7966 cfg->cbb->real_offset = cfg->real_offset;
7968 if ((cfg->method == method) && cfg->coverage_info) {
7969 guint32 cil_offset = ip - header->code;
7970 cfg->coverage_info->data [cil_offset].cil_code = ip;
7972 /* TODO: Use an increment here */
7973 #if defined(TARGET_X86)
7974 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7975 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7977 MONO_ADD_INS (cfg->cbb, ins);
7979 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7980 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7984 if (cfg->verbose_level > 3)
7985 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7989 if (seq_points && !sym_seq_points && sp != stack_start) {
7991 * The C# compiler uses these nops to notify the JIT that it should
7992 * insert seq points.
7994 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7995 MONO_ADD_INS (cfg->cbb, ins);
7997 if (cfg->keep_cil_nops)
7998 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8000 MONO_INST_NEW (cfg, ins, OP_NOP);
8002 MONO_ADD_INS (cfg->cbb, ins);
8005 if (should_insert_brekpoint (cfg->method)) {
8006 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8008 MONO_INST_NEW (cfg, ins, OP_NOP);
8011 MONO_ADD_INS (cfg->cbb, ins);
8017 CHECK_STACK_OVF (1);
8018 n = (*ip)-CEE_LDARG_0;
8020 EMIT_NEW_ARGLOAD (cfg, ins, n);
8028 CHECK_STACK_OVF (1);
8029 n = (*ip)-CEE_LDLOC_0;
8031 EMIT_NEW_LOCLOAD (cfg, ins, n);
8040 n = (*ip)-CEE_STLOC_0;
8043 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8045 emit_stloc_ir (cfg, sp, header, n);
8052 CHECK_STACK_OVF (1);
8055 EMIT_NEW_ARGLOAD (cfg, ins, n);
8061 CHECK_STACK_OVF (1);
8064 NEW_ARGLOADA (cfg, ins, n);
8065 MONO_ADD_INS (cfg->cbb, ins);
8075 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8077 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8082 CHECK_STACK_OVF (1);
8085 EMIT_NEW_LOCLOAD (cfg, ins, n);
8089 case CEE_LDLOCA_S: {
8090 unsigned char *tmp_ip;
8092 CHECK_STACK_OVF (1);
8093 CHECK_LOCAL (ip [1]);
8095 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8101 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8110 CHECK_LOCAL (ip [1]);
8111 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8113 emit_stloc_ir (cfg, sp, header, ip [1]);
8118 CHECK_STACK_OVF (1);
8119 EMIT_NEW_PCONST (cfg, ins, NULL);
8120 ins->type = STACK_OBJ;
8125 CHECK_STACK_OVF (1);
8126 EMIT_NEW_ICONST (cfg, ins, -1);
8139 CHECK_STACK_OVF (1);
8140 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8146 CHECK_STACK_OVF (1);
8148 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8154 CHECK_STACK_OVF (1);
8155 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8161 CHECK_STACK_OVF (1);
8162 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8163 ins->type = STACK_I8;
8164 ins->dreg = alloc_dreg (cfg, STACK_I8);
8166 ins->inst_l = (gint64)read64 (ip);
8167 MONO_ADD_INS (cfg->cbb, ins);
8173 gboolean use_aotconst = FALSE;
8175 #ifdef TARGET_POWERPC
8176 /* FIXME: Clean this up */
8177 if (cfg->compile_aot)
8178 use_aotconst = TRUE;
8181 /* FIXME: we should really allocate this only late in the compilation process */
8182 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8184 CHECK_STACK_OVF (1);
8190 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8192 dreg = alloc_freg (cfg);
8193 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8194 ins->type = cfg->r4_stack_type;
8196 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8197 ins->type = cfg->r4_stack_type;
8198 ins->dreg = alloc_dreg (cfg, STACK_R8);
8200 MONO_ADD_INS (cfg->cbb, ins);
8210 gboolean use_aotconst = FALSE;
8212 #ifdef TARGET_POWERPC
8213 /* FIXME: Clean this up */
8214 if (cfg->compile_aot)
8215 use_aotconst = TRUE;
8218 /* FIXME: we should really allocate this only late in the compilation process */
8219 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8221 CHECK_STACK_OVF (1);
8227 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8229 dreg = alloc_freg (cfg);
8230 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8231 ins->type = STACK_R8;
8233 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8234 ins->type = STACK_R8;
8235 ins->dreg = alloc_dreg (cfg, STACK_R8);
8237 MONO_ADD_INS (cfg->cbb, ins);
8246 MonoInst *temp, *store;
8248 CHECK_STACK_OVF (1);
8252 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8253 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8255 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8258 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8271 if (sp [0]->type == STACK_R8)
8272 /* we need to pop the value from the x86 FP stack */
8273 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8278 MonoMethodSignature *fsig;
8281 INLINE_FAILURE ("jmp");
8282 GSHAREDVT_FAILURE (*ip);
8285 if (stack_start != sp)
8287 token = read32 (ip + 1);
8288 /* FIXME: check the signature matches */
8289 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8292 if (cfg->gshared && mono_method_check_context_used (cmethod))
8293 GENERIC_SHARING_FAILURE (CEE_JMP);
8295 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8297 fsig = mono_method_signature (cmethod);
8298 n = fsig->param_count + fsig->hasthis;
8299 if (cfg->llvm_only) {
8302 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8303 for (i = 0; i < n; ++i)
8304 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8305 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8307 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8308 * have to emit a normal return since llvm expects it.
8311 emit_setret (cfg, ins);
8312 MONO_INST_NEW (cfg, ins, OP_BR);
8313 ins->inst_target_bb = end_bblock;
8314 MONO_ADD_INS (cfg->cbb, ins);
8315 link_bblock (cfg, cfg->cbb, end_bblock);
8318 } else if (cfg->backend->have_op_tail_call) {
8319 /* Handle tail calls similarly to calls */
8322 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8323 call->method = cmethod;
8324 call->tail_call = TRUE;
8325 call->signature = mono_method_signature (cmethod);
8326 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8327 call->inst.inst_p0 = cmethod;
8328 for (i = 0; i < n; ++i)
8329 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8331 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8332 call->vret_var = cfg->vret_addr;
8334 mono_arch_emit_call (cfg, call);
8335 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8336 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8338 for (i = 0; i < num_args; ++i)
8339 /* Prevent arguments from being optimized away */
8340 arg_array [i]->flags |= MONO_INST_VOLATILE;
8342 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8343 ins = (MonoInst*)call;
8344 ins->inst_p0 = cmethod;
8345 MONO_ADD_INS (cfg->cbb, ins);
8349 start_new_bblock = 1;
8354 MonoMethodSignature *fsig;
8357 token = read32 (ip + 1);
8361 //GSHAREDVT_FAILURE (*ip);
8366 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8369 if (method->dynamic && fsig->pinvoke) {
8373 * This is a call through a function pointer using a pinvoke
8374 * signature. Have to create a wrapper and call that instead.
8375 * FIXME: This is very slow, need to create a wrapper at JIT time
8376 * instead based on the signature.
8378 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8379 EMIT_NEW_PCONST (cfg, args [1], fsig);
8381 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8384 n = fsig->param_count + fsig->hasthis;
8388 //g_assert (!virtual_ || fsig->hasthis);
8392 inline_costs += 10 * num_calls++;
8395 * Making generic calls out of gsharedvt methods.
8396 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8397 * patching gshared method addresses into a gsharedvt method.
8399 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8401 * We pass the address to the gsharedvt trampoline in the rgctx reg
8403 MonoInst *callee = addr;
8405 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8407 GSHAREDVT_FAILURE (*ip);
8411 GSHAREDVT_FAILURE (*ip);
8413 addr = emit_get_rgctx_sig (cfg, context_used,
8414 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8415 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8419 /* Prevent inlining of methods with indirect calls */
8420 INLINE_FAILURE ("indirect call");
8422 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8423 MonoJumpInfoType info_type;
8427 * Instead of emitting an indirect call, emit a direct call
8428 * with the contents of the aotconst as the patch info.
8430 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8431 info_type = (MonoJumpInfoType)addr->inst_c1;
8432 info_data = addr->inst_p0;
8434 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8435 info_data = addr->inst_right->inst_left;
8438 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8439 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8442 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8443 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8448 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8452 /* End of call, INS should contain the result of the call, if any */
8454 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8456 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8459 CHECK_CFG_EXCEPTION;
8463 constrained_class = NULL;
8467 case CEE_CALLVIRT: {
8468 MonoInst *addr = NULL;
8469 MonoMethodSignature *fsig = NULL;
8471 int virtual_ = *ip == CEE_CALLVIRT;
8472 gboolean pass_imt_from_rgctx = FALSE;
8473 MonoInst *imt_arg = NULL;
8474 MonoInst *keep_this_alive = NULL;
8475 gboolean pass_vtable = FALSE;
8476 gboolean pass_mrgctx = FALSE;
8477 MonoInst *vtable_arg = NULL;
8478 gboolean check_this = FALSE;
8479 gboolean supported_tail_call = FALSE;
8480 gboolean tail_call = FALSE;
8481 gboolean need_seq_point = FALSE;
8482 guint32 call_opcode = *ip;
8483 gboolean emit_widen = TRUE;
8484 gboolean push_res = TRUE;
8485 gboolean skip_ret = FALSE;
8486 gboolean delegate_invoke = FALSE;
8487 gboolean direct_icall = FALSE;
8488 gboolean constrained_partial_call = FALSE;
8489 MonoMethod *cil_method;
8492 token = read32 (ip + 1);
8496 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8499 cil_method = cmethod;
8501 if (constrained_class) {
8502 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8503 if (!mini_is_gsharedvt_klass (constrained_class)) {
8504 g_assert (!cmethod->klass->valuetype);
8505 if (!mini_type_is_reference (&constrained_class->byval_arg))
8506 constrained_partial_call = TRUE;
8510 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8511 if (cfg->verbose_level > 2)
8512 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8513 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8514 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8516 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8520 if (cfg->verbose_level > 2)
8521 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8523 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8525 * This is needed since get_method_constrained can't find
8526 * the method in klass representing a type var.
8527 * The type var is guaranteed to be a reference type in this
8530 if (!mini_is_gsharedvt_klass (constrained_class))
8531 g_assert (!cmethod->klass->valuetype);
8533 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8539 if (!dont_verify && !cfg->skip_visibility) {
8540 MonoMethod *target_method = cil_method;
8541 if (method->is_inflated) {
8542 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8545 if (!mono_method_can_access_method (method_definition, target_method) &&
8546 !mono_method_can_access_method (method, cil_method))
8547 emit_method_access_failure (cfg, method, cil_method);
8550 if (mono_security_core_clr_enabled ())
8551 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8553 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8554 /* MS.NET seems to silently convert this to a callvirt */
8559 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8560 * converts to a callvirt.
8562 * tests/bug-515884.il is an example of this behavior
8564 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8565 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8566 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8570 if (!cmethod->klass->inited)
8571 if (!mono_class_init (cmethod->klass))
8572 TYPE_LOAD_ERROR (cmethod->klass);
8574 fsig = mono_method_signature (cmethod);
8577 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8578 mini_class_is_system_array (cmethod->klass)) {
8579 array_rank = cmethod->klass->rank;
8580 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8581 direct_icall = TRUE;
8582 } else if (fsig->pinvoke) {
8583 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8584 fsig = mono_method_signature (wrapper);
8585 } else if (constrained_class) {
8587 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8591 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8592 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8594 /* See code below */
8595 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8596 MonoBasicBlock *tbb;
8598 GET_BBLOCK (cfg, tbb, ip + 5);
8599 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8601 * We want to extend the try block to cover the call, but we can't do it if the
8602 * call is made directly since its followed by an exception check.
8604 direct_icall = FALSE;
8608 mono_save_token_info (cfg, image, token, cil_method);
8610 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8611 need_seq_point = TRUE;
8613 /* Don't support calls made using type arguments for now */
8615 if (cfg->gsharedvt) {
8616 if (mini_is_gsharedvt_signature (fsig))
8617 GSHAREDVT_FAILURE (*ip);
8621 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8622 g_assert_not_reached ();
8624 n = fsig->param_count + fsig->hasthis;
8626 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8630 g_assert (!mono_method_check_context_used (cmethod));
8634 //g_assert (!virtual_ || fsig->hasthis);
8639 * We have the `constrained.' prefix opcode.
8641 if (constrained_class) {
8642 if (mini_is_gsharedvt_klass (constrained_class)) {
8643 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8644 /* The 'Own method' case below */
8645 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8646 /* 'The type parameter is instantiated as a reference type' case below. */
8648 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8649 CHECK_CFG_EXCEPTION;
8655 if (constrained_partial_call) {
8656 gboolean need_box = TRUE;
8659 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8660 * called method is not known at compile time either. The called method could end up being
8661 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8662 * to box the receiver.
8663 * A simple solution would be to box always and make a normal virtual call, but that would
8664 * be bad performance wise.
8666 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8668 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8673 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8674 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8675 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8676 ins->klass = constrained_class;
8677 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8678 CHECK_CFG_EXCEPTION;
8679 } else if (need_box) {
8681 MonoBasicBlock *is_ref_bb, *end_bb;
8682 MonoInst *nonbox_call;
8685 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8687 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8688 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8690 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8692 NEW_BBLOCK (cfg, is_ref_bb);
8693 NEW_BBLOCK (cfg, end_bb);
8695 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8700 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8702 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8705 MONO_START_BB (cfg, is_ref_bb);
8706 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8707 ins->klass = constrained_class;
8708 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8709 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8711 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8713 MONO_START_BB (cfg, end_bb);
8716 nonbox_call->dreg = ins->dreg;
8719 g_assert (mono_class_is_interface (cmethod->klass));
8720 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8721 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8724 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8726 * The type parameter is instantiated as a valuetype,
8727 * but that type doesn't override the method we're
8728 * calling, so we need to box `this'.
8730 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8731 ins->klass = constrained_class;
8732 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8733 CHECK_CFG_EXCEPTION;
8734 } else if (!constrained_class->valuetype) {
8735 int dreg = alloc_ireg_ref (cfg);
8738 * The type parameter is instantiated as a reference
8739 * type. We have a managed pointer on the stack, so
8740 * we need to dereference it here.
8742 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8743 ins->type = STACK_OBJ;
8746 if (cmethod->klass->valuetype) {
8749 /* Interface method */
8752 mono_class_setup_vtable (constrained_class);
8753 CHECK_TYPELOAD (constrained_class);
8754 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8756 TYPE_LOAD_ERROR (constrained_class);
8757 slot = mono_method_get_vtable_slot (cmethod);
8759 TYPE_LOAD_ERROR (cmethod->klass);
8760 cmethod = constrained_class->vtable [ioffset + slot];
8762 if (cmethod->klass == mono_defaults.enum_class) {
8763 /* Enum implements some interfaces, so treat this as the first case */
8764 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8765 ins->klass = constrained_class;
8766 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8767 CHECK_CFG_EXCEPTION;
8772 constrained_class = NULL;
8775 if (check_call_signature (cfg, fsig, sp))
8778 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8779 delegate_invoke = TRUE;
8781 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8782 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8783 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8791 * If the callee is a shared method, then its static cctor
8792 * might not get called after the call was patched.
8794 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8795 emit_class_init (cfg, cmethod->klass);
8796 CHECK_TYPELOAD (cmethod->klass);
8799 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8802 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8804 context_used = mini_method_check_context_used (cfg, cmethod);
8806 if (context_used && mono_class_is_interface (cmethod->klass)) {
8807 /* Generic method interface
8808 calls are resolved via a
8809 helper function and don't
8811 if (!cmethod_context || !cmethod_context->method_inst)
8812 pass_imt_from_rgctx = TRUE;
8816 * If a shared method calls another
8817 * shared method then the caller must
8818 * have a generic sharing context
8819 * because the magic trampoline
8820 * requires it. FIXME: We shouldn't
8821 * have to force the vtable/mrgctx
8822 * variable here. Instead there
8823 * should be a flag in the cfg to
8824 * request a generic sharing context.
8827 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8828 mono_get_vtable_var (cfg);
8833 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8835 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8837 CHECK_TYPELOAD (cmethod->klass);
8838 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8843 g_assert (!vtable_arg);
8845 if (!cfg->compile_aot) {
8847 * emit_get_rgctx_method () calls mono_class_vtable () so check
8848 * for type load errors before.
8850 mono_class_setup_vtable (cmethod->klass);
8851 CHECK_TYPELOAD (cmethod->klass);
8854 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8856 /* !marshalbyref is needed to properly handle generic methods + remoting */
8857 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8858 MONO_METHOD_IS_FINAL (cmethod)) &&
8859 !mono_class_is_marshalbyref (cmethod->klass)) {
8866 if (pass_imt_from_rgctx) {
8867 g_assert (!pass_vtable);
8869 imt_arg = emit_get_rgctx_method (cfg, context_used,
8870 cmethod, MONO_RGCTX_INFO_METHOD);
8874 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8876 /* Calling virtual generic methods */
8877 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8878 !(MONO_METHOD_IS_FINAL (cmethod) &&
8879 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8880 fsig->generic_param_count &&
8881 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8883 MonoInst *this_temp, *this_arg_temp, *store;
8884 MonoInst *iargs [4];
8886 g_assert (fsig->is_inflated);
8888 /* Prevent inlining of methods that contain indirect calls */
8889 INLINE_FAILURE ("virtual generic call");
8891 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8892 GSHAREDVT_FAILURE (*ip);
8894 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8895 g_assert (!imt_arg);
8897 g_assert (cmethod->is_inflated);
8898 imt_arg = emit_get_rgctx_method (cfg, context_used,
8899 cmethod, MONO_RGCTX_INFO_METHOD);
8900 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8902 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8903 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8904 MONO_ADD_INS (cfg->cbb, store);
8906 /* FIXME: This should be a managed pointer */
8907 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8909 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8910 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8911 cmethod, MONO_RGCTX_INFO_METHOD);
8912 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8913 addr = mono_emit_jit_icall (cfg,
8914 mono_helper_compile_generic_method, iargs);
8916 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8918 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8925 * Implement a workaround for the inherent races involved in locking:
8931 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8932 * try block, the Exit () won't be executed, see:
8933 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8934 * To work around this, we extend such try blocks to include the last x bytes
8935 * of the Monitor.Enter () call.
8937 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8938 MonoBasicBlock *tbb;
8940 GET_BBLOCK (cfg, tbb, ip + 5);
8942 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8943 * from Monitor.Enter like ArgumentNullException.
8945 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8946 /* Mark this bblock as needing to be extended */
8947 tbb->extend_try_block = TRUE;
8951 /* Conversion to a JIT intrinsic */
8952 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8953 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8954 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8962 if ((cfg->opt & MONO_OPT_INLINE) &&
8963 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8964 mono_method_check_inlining (cfg, cmethod)) {
8966 gboolean always = FALSE;
8968 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8969 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8970 /* Prevent inlining of methods that call wrappers */
8971 INLINE_FAILURE ("wrapper call");
8972 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
8976 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
8978 cfg->real_offset += 5;
8980 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8981 /* *sp is already set by inline_method */
8986 inline_costs += costs;
8992 /* Tail recursion elimination */
8993 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8994 gboolean has_vtargs = FALSE;
8997 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8998 INLINE_FAILURE ("tail call");
9000 /* keep it simple */
9001 for (i = fsig->param_count - 1; i >= 0; i--) {
9002 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9007 if (need_seq_point) {
9008 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9009 need_seq_point = FALSE;
9011 for (i = 0; i < n; ++i)
9012 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9013 MONO_INST_NEW (cfg, ins, OP_BR);
9014 MONO_ADD_INS (cfg->cbb, ins);
9015 tblock = start_bblock->out_bb [0];
9016 link_bblock (cfg, cfg->cbb, tblock);
9017 ins->inst_target_bb = tblock;
9018 start_new_bblock = 1;
9020 /* skip the CEE_RET, too */
9021 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9028 inline_costs += 10 * num_calls++;
9031 * Synchronized wrappers.
9032 * Its hard to determine where to replace a method with its synchronized
9033 * wrapper without causing an infinite recursion. The current solution is
9034 * to add the synchronized wrapper in the trampolines, and to
9035 * change the called method to a dummy wrapper, and resolve that wrapper
9036 * to the real method in mono_jit_compile_method ().
9038 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9039 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9040 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9041 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9045 * Making generic calls out of gsharedvt methods.
9046 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9047 * patching gshared method addresses into a gsharedvt method.
9049 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9050 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9051 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9052 MonoRgctxInfoType info_type;
9055 //if (mono_class_is_interface (cmethod->klass))
9056 //GSHAREDVT_FAILURE (*ip);
9057 // disable for possible remoting calls
9058 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9059 GSHAREDVT_FAILURE (*ip);
9060 if (fsig->generic_param_count) {
9061 /* virtual generic call */
9062 g_assert (!imt_arg);
9063 /* Same as the virtual generic case above */
9064 imt_arg = emit_get_rgctx_method (cfg, context_used,
9065 cmethod, MONO_RGCTX_INFO_METHOD);
9066 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9068 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9069 /* This can happen when we call a fully instantiated iface method */
9070 imt_arg = emit_get_rgctx_method (cfg, context_used,
9071 cmethod, MONO_RGCTX_INFO_METHOD);
9076 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9077 keep_this_alive = sp [0];
9079 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9080 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9082 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9083 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9085 if (cfg->llvm_only) {
9086 // FIXME: Avoid initializing vtable_arg
9087 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9089 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9094 /* Generic sharing */
9097 * Use this if the callee is gsharedvt sharable too, since
9098 * at runtime we might find an instantiation so the call cannot
9099 * be patched (the 'no_patch' code path in mini-trampolines.c).
9101 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9102 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9103 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9104 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9105 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9106 INLINE_FAILURE ("gshared");
9108 g_assert (cfg->gshared && cmethod);
9112 * We are compiling a call to a
9113 * generic method from shared code,
9114 * which means that we have to look up
9115 * the method in the rgctx and do an
9119 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9121 if (cfg->llvm_only) {
9122 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9123 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9125 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9126 // FIXME: Avoid initializing imt_arg/vtable_arg
9127 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9129 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9130 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9135 /* Direct calls to icalls */
9137 MonoMethod *wrapper;
9140 /* Inline the wrapper */
9141 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9143 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9144 g_assert (costs > 0);
9145 cfg->real_offset += 5;
9147 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9148 /* *sp is already set by inline_method */
9153 inline_costs += costs;
9162 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9163 MonoInst *val = sp [fsig->param_count];
9165 if (val->type == STACK_OBJ) {
9166 MonoInst *iargs [2];
9171 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9174 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9175 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9176 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9177 emit_write_barrier (cfg, addr, val);
9178 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9179 GSHAREDVT_FAILURE (*ip);
9180 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9181 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9183 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9184 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9185 if (!cmethod->klass->element_class->valuetype && !readonly)
9186 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9187 CHECK_TYPELOAD (cmethod->klass);
9190 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9193 g_assert_not_reached ();
9200 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9204 /* Tail prefix / tail call optimization */
9206 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9207 /* FIXME: runtime generic context pointer for jumps? */
9208 /* FIXME: handle this for generic sharing eventually */
9209 if ((ins_flag & MONO_INST_TAILCALL) &&
9210 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9211 supported_tail_call = TRUE;
9213 if (supported_tail_call) {
9216 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9217 INLINE_FAILURE ("tail call");
9219 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9221 if (cfg->backend->have_op_tail_call) {
9222 /* Handle tail calls similarly to normal calls */
9225 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9227 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9228 call->tail_call = TRUE;
9229 call->method = cmethod;
9230 call->signature = mono_method_signature (cmethod);
9233 * We implement tail calls by storing the actual arguments into the
9234 * argument variables, then emitting a CEE_JMP.
9236 for (i = 0; i < n; ++i) {
9237 /* Prevent argument from being register allocated */
9238 arg_array [i]->flags |= MONO_INST_VOLATILE;
9239 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9241 ins = (MonoInst*)call;
9242 ins->inst_p0 = cmethod;
9243 ins->inst_p1 = arg_array [0];
9244 MONO_ADD_INS (cfg->cbb, ins);
9245 link_bblock (cfg, cfg->cbb, end_bblock);
9246 start_new_bblock = 1;
9248 // FIXME: Eliminate unreachable epilogs
9251 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9252 * only reachable from this call.
9254 GET_BBLOCK (cfg, tblock, ip + 5);
9255 if (tblock == cfg->cbb || tblock->in_count == 0)
9264 * Virtual calls in llvm-only mode.
9266 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9267 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9272 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9273 INLINE_FAILURE ("call");
9274 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9275 imt_arg, vtable_arg);
9277 if (tail_call && !cfg->llvm_only) {
9278 link_bblock (cfg, cfg->cbb, end_bblock);
9279 start_new_bblock = 1;
9281 // FIXME: Eliminate unreachable epilogs
9284 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9285 * only reachable from this call.
9287 GET_BBLOCK (cfg, tblock, ip + 5);
9288 if (tblock == cfg->cbb || tblock->in_count == 0)
9295 /* End of call, INS should contain the result of the call, if any */
9297 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9300 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9305 if (keep_this_alive) {
9306 MonoInst *dummy_use;
9308 /* See mono_emit_method_call_full () */
9309 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9312 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9314 * Clang can convert these calls to tail calls which screw up the stack
9315 * walk. This happens even when the -fno-optimize-sibling-calls
9316 * option is passed to clang.
9317 * Work around this by emitting a dummy call.
9319 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9322 CHECK_CFG_EXCEPTION;
9326 g_assert (*ip == CEE_RET);
9330 constrained_class = NULL;
9332 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9336 if (cfg->method != method) {
9337 /* return from inlined method */
9339 * If in_count == 0, that means the ret is unreachable due to
9340 * being preceeded by a throw. In that case, inline_method () will
9341 * handle setting the return value
9342 * (test case: test_0_inline_throw ()).
9344 if (return_var && cfg->cbb->in_count) {
9345 MonoType *ret_type = mono_method_signature (method)->ret;
9351 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9354 //g_assert (returnvar != -1);
9355 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9356 cfg->ret_var_set = TRUE;
9359 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9361 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9365 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9367 if (seq_points && !sym_seq_points) {
9369 * Place a seq point here too even through the IL stack is not
9370 * empty, so a step over on
9373 * will work correctly.
9375 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9376 MONO_ADD_INS (cfg->cbb, ins);
9379 g_assert (!return_var);
9383 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9386 emit_setret (cfg, *sp);
9389 if (sp != stack_start)
9391 MONO_INST_NEW (cfg, ins, OP_BR);
9393 ins->inst_target_bb = end_bblock;
9394 MONO_ADD_INS (cfg->cbb, ins);
9395 link_bblock (cfg, cfg->cbb, end_bblock);
9396 start_new_bblock = 1;
9400 MONO_INST_NEW (cfg, ins, OP_BR);
9402 target = ip + 1 + (signed char)(*ip);
9404 GET_BBLOCK (cfg, tblock, target);
9405 link_bblock (cfg, cfg->cbb, tblock);
9406 ins->inst_target_bb = tblock;
9407 if (sp != stack_start) {
9408 handle_stack_args (cfg, stack_start, sp - stack_start);
9410 CHECK_UNVERIFIABLE (cfg);
9412 MONO_ADD_INS (cfg->cbb, ins);
9413 start_new_bblock = 1;
9414 inline_costs += BRANCH_COST;
9428 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9430 target = ip + 1 + *(signed char*)ip;
9436 inline_costs += BRANCH_COST;
9440 MONO_INST_NEW (cfg, ins, OP_BR);
9443 target = ip + 4 + (gint32)read32(ip);
9445 GET_BBLOCK (cfg, tblock, target);
9446 link_bblock (cfg, cfg->cbb, tblock);
9447 ins->inst_target_bb = tblock;
9448 if (sp != stack_start) {
9449 handle_stack_args (cfg, stack_start, sp - stack_start);
9451 CHECK_UNVERIFIABLE (cfg);
9454 MONO_ADD_INS (cfg->cbb, ins);
9456 start_new_bblock = 1;
9457 inline_costs += BRANCH_COST;
9464 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9465 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9466 guint32 opsize = is_short ? 1 : 4;
9468 CHECK_OPSIZE (opsize);
9470 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9473 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9478 GET_BBLOCK (cfg, tblock, target);
9479 link_bblock (cfg, cfg->cbb, tblock);
9480 GET_BBLOCK (cfg, tblock, ip);
9481 link_bblock (cfg, cfg->cbb, tblock);
9483 if (sp != stack_start) {
9484 handle_stack_args (cfg, stack_start, sp - stack_start);
9485 CHECK_UNVERIFIABLE (cfg);
9488 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9489 cmp->sreg1 = sp [0]->dreg;
9490 type_from_op (cfg, cmp, sp [0], NULL);
9493 #if SIZEOF_REGISTER == 4
9494 if (cmp->opcode == OP_LCOMPARE_IMM) {
9495 /* Convert it to OP_LCOMPARE */
9496 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9497 ins->type = STACK_I8;
9498 ins->dreg = alloc_dreg (cfg, STACK_I8);
9500 MONO_ADD_INS (cfg->cbb, ins);
9501 cmp->opcode = OP_LCOMPARE;
9502 cmp->sreg2 = ins->dreg;
9505 MONO_ADD_INS (cfg->cbb, cmp);
9507 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9508 type_from_op (cfg, ins, sp [0], NULL);
9509 MONO_ADD_INS (cfg->cbb, ins);
9510 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9511 GET_BBLOCK (cfg, tblock, target);
9512 ins->inst_true_bb = tblock;
9513 GET_BBLOCK (cfg, tblock, ip);
9514 ins->inst_false_bb = tblock;
9515 start_new_bblock = 2;
9518 inline_costs += BRANCH_COST;
9533 MONO_INST_NEW (cfg, ins, *ip);
9535 target = ip + 4 + (gint32)read32(ip);
9541 inline_costs += BRANCH_COST;
9545 MonoBasicBlock **targets;
9546 MonoBasicBlock *default_bblock;
9547 MonoJumpInfoBBTable *table;
9548 int offset_reg = alloc_preg (cfg);
9549 int target_reg = alloc_preg (cfg);
9550 int table_reg = alloc_preg (cfg);
9551 int sum_reg = alloc_preg (cfg);
9552 gboolean use_op_switch;
9556 n = read32 (ip + 1);
9559 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9563 CHECK_OPSIZE (n * sizeof (guint32));
9564 target = ip + n * sizeof (guint32);
9566 GET_BBLOCK (cfg, default_bblock, target);
9567 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9569 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9570 for (i = 0; i < n; ++i) {
9571 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9572 targets [i] = tblock;
9573 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9577 if (sp != stack_start) {
9579 * Link the current bb with the targets as well, so handle_stack_args
9580 * will set their in_stack correctly.
9582 link_bblock (cfg, cfg->cbb, default_bblock);
9583 for (i = 0; i < n; ++i)
9584 link_bblock (cfg, cfg->cbb, targets [i]);
9586 handle_stack_args (cfg, stack_start, sp - stack_start);
9588 CHECK_UNVERIFIABLE (cfg);
9590 /* Undo the links */
9591 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9592 for (i = 0; i < n; ++i)
9593 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9599 for (i = 0; i < n; ++i)
9600 link_bblock (cfg, cfg->cbb, targets [i]);
9602 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9603 table->table = targets;
9604 table->table_size = n;
9606 use_op_switch = FALSE;
9608 /* ARM implements SWITCH statements differently */
9609 /* FIXME: Make it use the generic implementation */
9610 if (!cfg->compile_aot)
9611 use_op_switch = TRUE;
9614 if (COMPILE_LLVM (cfg))
9615 use_op_switch = TRUE;
9617 cfg->cbb->has_jump_table = 1;
9619 if (use_op_switch) {
9620 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9621 ins->sreg1 = src1->dreg;
9622 ins->inst_p0 = table;
9623 ins->inst_many_bb = targets;
9624 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9625 MONO_ADD_INS (cfg->cbb, ins);
9627 if (sizeof (gpointer) == 8)
9628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9632 #if SIZEOF_REGISTER == 8
9633 /* The upper word might not be zero, and we add it to a 64 bit address later */
9634 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9637 if (cfg->compile_aot) {
9638 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9640 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9641 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9642 ins->inst_p0 = table;
9643 ins->dreg = table_reg;
9644 MONO_ADD_INS (cfg->cbb, ins);
9647 /* FIXME: Use load_memindex */
9648 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9650 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9652 start_new_bblock = 1;
9653 inline_costs += (BRANCH_COST * 2);
9673 dreg = alloc_freg (cfg);
9676 dreg = alloc_lreg (cfg);
9679 dreg = alloc_ireg_ref (cfg);
9682 dreg = alloc_preg (cfg);
9685 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9686 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9687 if (*ip == CEE_LDIND_R4)
9688 ins->type = cfg->r4_stack_type;
9689 ins->flags |= ins_flag;
9690 MONO_ADD_INS (cfg->cbb, ins);
9692 if (ins_flag & MONO_INST_VOLATILE) {
9693 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9694 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9710 if (ins_flag & MONO_INST_VOLATILE) {
9711 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9712 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9715 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9716 ins->flags |= ins_flag;
9719 MONO_ADD_INS (cfg->cbb, ins);
9721 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9722 emit_write_barrier (cfg, sp [0], sp [1]);
9731 MONO_INST_NEW (cfg, ins, (*ip));
9733 ins->sreg1 = sp [0]->dreg;
9734 ins->sreg2 = sp [1]->dreg;
9735 type_from_op (cfg, ins, sp [0], sp [1]);
9737 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9739 /* Use the immediate opcodes if possible */
9740 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9741 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9742 if (imm_opcode != -1) {
9743 ins->opcode = imm_opcode;
9744 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9747 NULLIFY_INS (sp [1]);
9751 MONO_ADD_INS ((cfg)->cbb, (ins));
9753 *sp++ = mono_decompose_opcode (cfg, ins);
9770 MONO_INST_NEW (cfg, ins, (*ip));
9772 ins->sreg1 = sp [0]->dreg;
9773 ins->sreg2 = sp [1]->dreg;
9774 type_from_op (cfg, ins, sp [0], sp [1]);
9776 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9777 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9779 /* FIXME: Pass opcode to is_inst_imm */
9781 /* Use the immediate opcodes if possible */
9782 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9783 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9784 if (imm_opcode != -1) {
9785 ins->opcode = imm_opcode;
9786 if (sp [1]->opcode == OP_I8CONST) {
9787 #if SIZEOF_REGISTER == 8
9788 ins->inst_imm = sp [1]->inst_l;
9790 ins->inst_ls_word = sp [1]->inst_ls_word;
9791 ins->inst_ms_word = sp [1]->inst_ms_word;
9795 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9798 /* Might be followed by an instruction added by add_widen_op */
9799 if (sp [1]->next == NULL)
9800 NULLIFY_INS (sp [1]);
9803 MONO_ADD_INS ((cfg)->cbb, (ins));
9805 *sp++ = mono_decompose_opcode (cfg, ins);
9818 case CEE_CONV_OVF_I8:
9819 case CEE_CONV_OVF_U8:
9823 /* Special case this earlier so we have long constants in the IR */
9824 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9825 int data = sp [-1]->inst_c0;
9826 sp [-1]->opcode = OP_I8CONST;
9827 sp [-1]->type = STACK_I8;
9828 #if SIZEOF_REGISTER == 8
9829 if ((*ip) == CEE_CONV_U8)
9830 sp [-1]->inst_c0 = (guint32)data;
9832 sp [-1]->inst_c0 = data;
9834 sp [-1]->inst_ls_word = data;
9835 if ((*ip) == CEE_CONV_U8)
9836 sp [-1]->inst_ms_word = 0;
9838 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9840 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9847 case CEE_CONV_OVF_I4:
9848 case CEE_CONV_OVF_I1:
9849 case CEE_CONV_OVF_I2:
9850 case CEE_CONV_OVF_I:
9851 case CEE_CONV_OVF_U:
9854 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9855 ADD_UNOP (CEE_CONV_OVF_I8);
9862 case CEE_CONV_OVF_U1:
9863 case CEE_CONV_OVF_U2:
9864 case CEE_CONV_OVF_U4:
9867 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9868 ADD_UNOP (CEE_CONV_OVF_U8);
9875 case CEE_CONV_OVF_I1_UN:
9876 case CEE_CONV_OVF_I2_UN:
9877 case CEE_CONV_OVF_I4_UN:
9878 case CEE_CONV_OVF_I8_UN:
9879 case CEE_CONV_OVF_U1_UN:
9880 case CEE_CONV_OVF_U2_UN:
9881 case CEE_CONV_OVF_U4_UN:
9882 case CEE_CONV_OVF_U8_UN:
9883 case CEE_CONV_OVF_I_UN:
9884 case CEE_CONV_OVF_U_UN:
9891 CHECK_CFG_EXCEPTION;
9895 case CEE_ADD_OVF_UN:
9897 case CEE_MUL_OVF_UN:
9899 case CEE_SUB_OVF_UN:
9905 GSHAREDVT_FAILURE (*ip);
9908 token = read32 (ip + 1);
9909 klass = mini_get_class (method, token, generic_context);
9910 CHECK_TYPELOAD (klass);
9912 if (generic_class_is_reference_type (cfg, klass)) {
9913 MonoInst *store, *load;
9914 int dreg = alloc_ireg_ref (cfg);
9916 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9917 load->flags |= ins_flag;
9918 MONO_ADD_INS (cfg->cbb, load);
9920 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9921 store->flags |= ins_flag;
9922 MONO_ADD_INS (cfg->cbb, store);
9924 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9925 emit_write_barrier (cfg, sp [0], sp [1]);
9927 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9939 token = read32 (ip + 1);
9940 klass = mini_get_class (method, token, generic_context);
9941 CHECK_TYPELOAD (klass);
9943 /* Optimize the common ldobj+stloc combination */
9953 loc_index = ip [5] - CEE_STLOC_0;
9960 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
9961 CHECK_LOCAL (loc_index);
9963 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9964 ins->dreg = cfg->locals [loc_index]->dreg;
9965 ins->flags |= ins_flag;
9968 if (ins_flag & MONO_INST_VOLATILE) {
9969 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9970 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9976 /* Optimize the ldobj+stobj combination */
9977 /* The reference case ends up being a load+store anyway */
9978 /* Skip this if the operation is volatile. */
9979 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9984 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9991 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9992 ins->flags |= ins_flag;
9995 if (ins_flag & MONO_INST_VOLATILE) {
9996 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9997 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10006 CHECK_STACK_OVF (1);
10008 n = read32 (ip + 1);
10010 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10011 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10012 ins->type = STACK_OBJ;
10015 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10016 MonoInst *iargs [1];
10017 char *str = (char *)mono_method_get_wrapper_data (method, n);
10019 if (cfg->compile_aot)
10020 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10022 EMIT_NEW_PCONST (cfg, iargs [0], str);
10023 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10025 if (cfg->opt & MONO_OPT_SHARED) {
10026 MonoInst *iargs [3];
10028 if (cfg->compile_aot) {
10029 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10031 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10032 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10033 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10034 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10035 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10038 if (cfg->cbb->out_of_line) {
10039 MonoInst *iargs [2];
10041 if (image == mono_defaults.corlib) {
10043 * Avoid relocations in AOT and save some space by using a
10044 * version of helper_ldstr specialized to mscorlib.
10046 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10047 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10049 /* Avoid creating the string object */
10050 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10051 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10052 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10056 if (cfg->compile_aot) {
10057 NEW_LDSTRCONST (cfg, ins, image, n);
10059 MONO_ADD_INS (cfg->cbb, ins);
10062 NEW_PCONST (cfg, ins, NULL);
10063 ins->type = STACK_OBJ;
10064 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10068 OUT_OF_MEMORY_FAILURE;
10071 MONO_ADD_INS (cfg->cbb, ins);
10080 MonoInst *iargs [2];
10081 MonoMethodSignature *fsig;
10084 MonoInst *vtable_arg = NULL;
10087 token = read32 (ip + 1);
10088 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10091 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10094 mono_save_token_info (cfg, image, token, cmethod);
10096 if (!mono_class_init (cmethod->klass))
10097 TYPE_LOAD_ERROR (cmethod->klass);
10099 context_used = mini_method_check_context_used (cfg, cmethod);
10101 if (mono_security_core_clr_enabled ())
10102 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10104 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10105 emit_class_init (cfg, cmethod->klass);
10106 CHECK_TYPELOAD (cmethod->klass);
10110 if (cfg->gsharedvt) {
10111 if (mini_is_gsharedvt_variable_signature (sig))
10112 GSHAREDVT_FAILURE (*ip);
10116 n = fsig->param_count;
10120 * Generate smaller code for the common newobj <exception> instruction in
10121 * argument checking code.
10123 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10124 is_exception_class (cmethod->klass) && n <= 2 &&
10125 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10126 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10127 MonoInst *iargs [3];
10131 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10134 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10137 iargs [1] = sp [0];
10138 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10141 iargs [1] = sp [0];
10142 iargs [2] = sp [1];
10143 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10146 g_assert_not_reached ();
10154 /* move the args to allow room for 'this' in the first position */
10160 /* check_call_signature () requires sp[0] to be set */
10161 this_ins.type = STACK_OBJ;
10162 sp [0] = &this_ins;
10163 if (check_call_signature (cfg, fsig, sp))
10168 if (mini_class_is_system_array (cmethod->klass)) {
10169 *sp = emit_get_rgctx_method (cfg, context_used,
10170 cmethod, MONO_RGCTX_INFO_METHOD);
10172 /* Avoid varargs in the common case */
10173 if (fsig->param_count == 1)
10174 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10175 else if (fsig->param_count == 2)
10176 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10177 else if (fsig->param_count == 3)
10178 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10179 else if (fsig->param_count == 4)
10180 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10182 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10183 } else if (cmethod->string_ctor) {
10184 g_assert (!context_used);
10185 g_assert (!vtable_arg);
10186 /* we simply pass a null pointer */
10187 EMIT_NEW_PCONST (cfg, *sp, NULL);
10188 /* now call the string ctor */
10189 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10191 if (cmethod->klass->valuetype) {
10192 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10193 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10194 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10199 * The code generated by mini_emit_virtual_call () expects
10200 * iargs [0] to be a boxed instance, but luckily the vcall
10201 * will be transformed into a normal call there.
10203 } else if (context_used) {
10204 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10207 MonoVTable *vtable = NULL;
10209 if (!cfg->compile_aot)
10210 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10211 CHECK_TYPELOAD (cmethod->klass);
10214 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10215 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10216 * As a workaround, we call class cctors before allocating objects.
10218 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10219 emit_class_init (cfg, cmethod->klass);
10220 if (cfg->verbose_level > 2)
10221 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10222 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10225 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10228 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10231 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10233 /* Now call the actual ctor */
10234 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10235 CHECK_CFG_EXCEPTION;
10238 if (alloc == NULL) {
10240 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10241 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10249 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10250 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10253 case CEE_CASTCLASS:
10258 token = read32 (ip + 1);
10259 klass = mini_get_class (method, token, generic_context);
10260 CHECK_TYPELOAD (klass);
10261 if (sp [0]->type != STACK_OBJ)
10264 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10265 ins->dreg = alloc_preg (cfg);
10266 ins->sreg1 = (*sp)->dreg;
10267 ins->klass = klass;
10268 ins->type = STACK_OBJ;
10269 MONO_ADD_INS (cfg->cbb, ins);
10271 CHECK_CFG_EXCEPTION;
10275 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10278 case CEE_UNBOX_ANY: {
10279 MonoInst *res, *addr;
10284 token = read32 (ip + 1);
10285 klass = mini_get_class (method, token, generic_context);
10286 CHECK_TYPELOAD (klass);
10288 mono_save_token_info (cfg, image, token, klass);
10290 context_used = mini_class_check_context_used (cfg, klass);
10292 if (mini_is_gsharedvt_klass (klass)) {
10293 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10295 } else if (generic_class_is_reference_type (cfg, klass)) {
10296 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10297 EMIT_NEW_PCONST (cfg, res, NULL);
10298 res->type = STACK_OBJ;
10300 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10301 res->dreg = alloc_preg (cfg);
10302 res->sreg1 = (*sp)->dreg;
10303 res->klass = klass;
10304 res->type = STACK_OBJ;
10305 MONO_ADD_INS (cfg->cbb, res);
10306 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10308 } else if (mono_class_is_nullable (klass)) {
10309 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10311 addr = handle_unbox (cfg, klass, sp, context_used);
10313 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10324 MonoClass *enum_class;
10325 MonoMethod *has_flag;
10331 token = read32 (ip + 1);
10332 klass = mini_get_class (method, token, generic_context);
10333 CHECK_TYPELOAD (klass);
10335 mono_save_token_info (cfg, image, token, klass);
10337 context_used = mini_class_check_context_used (cfg, klass);
10339 if (generic_class_is_reference_type (cfg, klass)) {
10345 if (klass == mono_defaults.void_class)
10347 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10349 /* frequent check in generic code: box (struct), brtrue */
10354 * <push int/long ptr>
10357 * constrained. MyFlags
10358 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10360 * If we find this sequence and the operand types on box and constrained
10361 * are equal, we can emit a specialized instruction sequence instead of
10362 * the very slow HasFlag () call.
10364 if ((cfg->opt & MONO_OPT_INTRINS) &&
10365 /* Cheap checks first. */
10366 ip + 5 + 6 + 5 < end &&
10367 ip [5] == CEE_PREFIX1 &&
10368 ip [6] == CEE_CONSTRAINED_ &&
10369 ip [11] == CEE_CALLVIRT &&
10370 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10371 mono_class_is_enum (klass) &&
10372 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10373 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10374 has_flag->klass == mono_defaults.enum_class &&
10375 !strcmp (has_flag->name, "HasFlag") &&
10376 has_flag->signature->hasthis &&
10377 has_flag->signature->param_count == 1) {
10378 CHECK_TYPELOAD (enum_class);
10380 if (enum_class == klass) {
10381 MonoInst *enum_this, *enum_flag;
10386 enum_this = sp [0];
10387 enum_flag = sp [1];
10389 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10394 // FIXME: LLVM can't handle the inconsistent bb linking
10395 if (!mono_class_is_nullable (klass) &&
10396 !mini_is_gsharedvt_klass (klass) &&
10397 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10398 (ip [5] == CEE_BRTRUE ||
10399 ip [5] == CEE_BRTRUE_S ||
10400 ip [5] == CEE_BRFALSE ||
10401 ip [5] == CEE_BRFALSE_S)) {
10402 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10404 MonoBasicBlock *true_bb, *false_bb;
10408 if (cfg->verbose_level > 3) {
10409 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10410 printf ("<box+brtrue opt>\n");
10415 case CEE_BRFALSE_S:
10418 target = ip + 1 + (signed char)(*ip);
10425 target = ip + 4 + (gint)(read32 (ip));
10429 g_assert_not_reached ();
10433 * We need to link both bblocks, since it is needed for handling stack
10434 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10435 * Branching to only one of them would lead to inconsistencies, so
10436 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10438 GET_BBLOCK (cfg, true_bb, target);
10439 GET_BBLOCK (cfg, false_bb, ip);
10441 mono_link_bblock (cfg, cfg->cbb, true_bb);
10442 mono_link_bblock (cfg, cfg->cbb, false_bb);
10444 if (sp != stack_start) {
10445 handle_stack_args (cfg, stack_start, sp - stack_start);
10447 CHECK_UNVERIFIABLE (cfg);
10450 if (COMPILE_LLVM (cfg)) {
10451 dreg = alloc_ireg (cfg);
10452 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10453 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10455 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10457 /* The JIT can't eliminate the iconst+compare */
10458 MONO_INST_NEW (cfg, ins, OP_BR);
10459 ins->inst_target_bb = is_true ? true_bb : false_bb;
10460 MONO_ADD_INS (cfg->cbb, ins);
10463 start_new_bblock = 1;
10467 *sp++ = handle_box (cfg, val, klass, context_used);
10469 CHECK_CFG_EXCEPTION;
10478 token = read32 (ip + 1);
10479 klass = mini_get_class (method, token, generic_context);
10480 CHECK_TYPELOAD (klass);
10482 mono_save_token_info (cfg, image, token, klass);
10484 context_used = mini_class_check_context_used (cfg, klass);
10486 if (mono_class_is_nullable (klass)) {
10489 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10490 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10494 ins = handle_unbox (cfg, klass, sp, context_used);
10507 MonoClassField *field;
10508 #ifndef DISABLE_REMOTING
10512 gboolean is_instance;
10514 gpointer addr = NULL;
10515 gboolean is_special_static;
10517 MonoInst *store_val = NULL;
10518 MonoInst *thread_ins;
10521 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10523 if (op == CEE_STFLD) {
10526 store_val = sp [1];
10531 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10533 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10536 if (op == CEE_STSFLD) {
10539 store_val = sp [0];
10544 token = read32 (ip + 1);
10545 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10546 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10547 klass = field->parent;
10550 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10553 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10554 FIELD_ACCESS_FAILURE (method, field);
10555 mono_class_init (klass);
10557 /* if the class is Critical then transparent code cannot access it's fields */
10558 if (!is_instance && mono_security_core_clr_enabled ())
10559 ensure_method_is_allowed_to_access_field (cfg, method, field);
10561 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10562 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10563 if (mono_security_core_clr_enabled ())
10564 ensure_method_is_allowed_to_access_field (cfg, method, field);
10567 ftype = mono_field_get_type (field);
10570 * LDFLD etc. is usable on static fields as well, so convert those cases to
10573 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10585 g_assert_not_reached ();
10587 is_instance = FALSE;
10590 context_used = mini_class_check_context_used (cfg, klass);
10592 /* INSTANCE CASE */
10594 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10595 if (op == CEE_STFLD) {
10596 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10598 #ifndef DISABLE_REMOTING
10599 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10600 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10601 MonoInst *iargs [5];
10603 GSHAREDVT_FAILURE (op);
10605 iargs [0] = sp [0];
10606 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10607 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10608 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10610 iargs [4] = sp [1];
10612 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10613 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10614 iargs, ip, cfg->real_offset, TRUE);
10615 CHECK_CFG_EXCEPTION;
10616 g_assert (costs > 0);
10618 cfg->real_offset += 5;
10620 inline_costs += costs;
10622 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10627 MonoInst *store, *wbarrier_ptr_ins = NULL;
10629 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10631 if (ins_flag & MONO_INST_VOLATILE) {
10632 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10633 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10636 if (mini_is_gsharedvt_klass (klass)) {
10637 MonoInst *offset_ins;
10639 context_used = mini_class_check_context_used (cfg, klass);
10641 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10642 /* The value is offset by 1 */
10643 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10644 dreg = alloc_ireg_mp (cfg);
10645 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10646 wbarrier_ptr_ins = ins;
10647 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10648 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10650 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10652 if (sp [0]->opcode != OP_LDADDR)
10653 store->flags |= MONO_INST_FAULT;
10655 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10656 if (mini_is_gsharedvt_klass (klass)) {
10657 g_assert (wbarrier_ptr_ins);
10658 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10660 /* insert call to write barrier */
10664 dreg = alloc_ireg_mp (cfg);
10665 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10666 emit_write_barrier (cfg, ptr, sp [1]);
10670 store->flags |= ins_flag;
10677 #ifndef DISABLE_REMOTING
10678 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10679 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10680 MonoInst *iargs [4];
10682 GSHAREDVT_FAILURE (op);
10684 iargs [0] = sp [0];
10685 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10686 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10687 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10688 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10689 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10690 iargs, ip, cfg->real_offset, TRUE);
10691 CHECK_CFG_EXCEPTION;
10692 g_assert (costs > 0);
10694 cfg->real_offset += 5;
10698 inline_costs += costs;
10700 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10706 if (sp [0]->type == STACK_VTYPE) {
10709 /* Have to compute the address of the variable */
10711 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10713 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10715 g_assert (var->klass == klass);
10717 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10721 if (op == CEE_LDFLDA) {
10722 if (sp [0]->type == STACK_OBJ) {
10723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10724 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10727 dreg = alloc_ireg_mp (cfg);
10729 if (mini_is_gsharedvt_klass (klass)) {
10730 MonoInst *offset_ins;
10732 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10733 /* The value is offset by 1 */
10734 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10735 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10737 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10739 ins->klass = mono_class_from_mono_type (field->type);
10740 ins->type = STACK_MP;
10745 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10747 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10748 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10757 if (mini_is_gsharedvt_klass (klass)) {
10758 MonoInst *offset_ins;
10760 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10761 /* The value is offset by 1 */
10762 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10763 dreg = alloc_ireg_mp (cfg);
10764 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10765 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10767 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10769 load->flags |= ins_flag;
10770 if (sp [0]->opcode != OP_LDADDR)
10771 load->flags |= MONO_INST_FAULT;
10783 context_used = mini_class_check_context_used (cfg, klass);
10785 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10786 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10790 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10791 * to be called here.
10793 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10794 mono_class_vtable (cfg->domain, klass);
10795 CHECK_TYPELOAD (klass);
10797 mono_domain_lock (cfg->domain);
10798 if (cfg->domain->special_static_fields)
10799 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10800 mono_domain_unlock (cfg->domain);
10802 is_special_static = mono_class_field_is_special_static (field);
10804 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10805 thread_ins = mono_get_thread_intrinsic (cfg);
10809 /* Generate IR to compute the field address */
10810 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10812 * Fast access to TLS data
10813 * Inline version of get_thread_static_data () in
10817 int idx, static_data_reg, array_reg, dreg;
10819 GSHAREDVT_FAILURE (op);
10821 MONO_ADD_INS (cfg->cbb, thread_ins);
10822 static_data_reg = alloc_ireg (cfg);
10823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10825 if (cfg->compile_aot) {
10826 int offset_reg, offset2_reg, idx_reg;
10828 /* For TLS variables, this will return the TLS offset */
10829 EMIT_NEW_SFLDACONST (cfg, ins, field);
10830 offset_reg = ins->dreg;
10831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10832 idx_reg = alloc_ireg (cfg);
10833 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10835 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10836 array_reg = alloc_ireg (cfg);
10837 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10838 offset2_reg = alloc_ireg (cfg);
10839 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10840 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10841 dreg = alloc_ireg (cfg);
10842 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10844 offset = (gsize)addr & 0x7fffffff;
10845 idx = offset & 0x3f;
10847 array_reg = alloc_ireg (cfg);
10848 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10849 dreg = alloc_ireg (cfg);
10850 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10852 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10853 (cfg->compile_aot && is_special_static) ||
10854 (context_used && is_special_static)) {
10855 MonoInst *iargs [2];
10857 g_assert (field->parent);
10858 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10859 if (context_used) {
10860 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10861 field, MONO_RGCTX_INFO_CLASS_FIELD);
10863 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10865 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10866 } else if (context_used) {
10867 MonoInst *static_data;
10870 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10871 method->klass->name_space, method->klass->name, method->name,
10872 depth, field->offset);
10875 if (mono_class_needs_cctor_run (klass, method))
10876 emit_class_init (cfg, klass);
10879 * The pointer we're computing here is
10881 * super_info.static_data + field->offset
10883 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10884 klass, MONO_RGCTX_INFO_STATIC_DATA);
10886 if (mini_is_gsharedvt_klass (klass)) {
10887 MonoInst *offset_ins;
10889 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10890 /* The value is offset by 1 */
10891 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10892 dreg = alloc_ireg_mp (cfg);
10893 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10894 } else if (field->offset == 0) {
10897 int addr_reg = mono_alloc_preg (cfg);
10898 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10900 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10901 MonoInst *iargs [2];
10903 g_assert (field->parent);
10904 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10905 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10906 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10908 MonoVTable *vtable = NULL;
10910 if (!cfg->compile_aot)
10911 vtable = mono_class_vtable (cfg->domain, klass);
10912 CHECK_TYPELOAD (klass);
10915 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10916 if (!(g_slist_find (class_inits, klass))) {
10917 emit_class_init (cfg, klass);
10918 if (cfg->verbose_level > 2)
10919 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10920 class_inits = g_slist_prepend (class_inits, klass);
10923 if (cfg->run_cctors) {
10924 /* This makes so that inline cannot trigger */
10925 /* .cctors: too many apps depend on them */
10926 /* running with a specific order... */
10928 if (! vtable->initialized)
10929 INLINE_FAILURE ("class init");
10930 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10931 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10932 goto exception_exit;
10936 if (cfg->compile_aot)
10937 EMIT_NEW_SFLDACONST (cfg, ins, field);
10940 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10942 EMIT_NEW_PCONST (cfg, ins, addr);
10945 MonoInst *iargs [1];
10946 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10947 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10951 /* Generate IR to do the actual load/store operation */
10953 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10954 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10955 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10958 if (op == CEE_LDSFLDA) {
10959 ins->klass = mono_class_from_mono_type (ftype);
10960 ins->type = STACK_PTR;
10962 } else if (op == CEE_STSFLD) {
10965 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10966 store->flags |= ins_flag;
10968 gboolean is_const = FALSE;
10969 MonoVTable *vtable = NULL;
10970 gpointer addr = NULL;
10972 if (!context_used) {
10973 vtable = mono_class_vtable (cfg->domain, klass);
10974 CHECK_TYPELOAD (klass);
10976 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10977 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10978 int ro_type = ftype->type;
10980 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10981 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10982 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10985 GSHAREDVT_FAILURE (op);
10987 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10990 case MONO_TYPE_BOOLEAN:
10992 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10996 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10999 case MONO_TYPE_CHAR:
11001 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11005 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11010 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11014 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11019 case MONO_TYPE_PTR:
11020 case MONO_TYPE_FNPTR:
11021 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11022 type_to_eval_stack_type ((cfg), field->type, *sp);
11025 case MONO_TYPE_STRING:
11026 case MONO_TYPE_OBJECT:
11027 case MONO_TYPE_CLASS:
11028 case MONO_TYPE_SZARRAY:
11029 case MONO_TYPE_ARRAY:
11030 if (!mono_gc_is_moving ()) {
11031 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11032 type_to_eval_stack_type ((cfg), field->type, *sp);
11040 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11045 case MONO_TYPE_VALUETYPE:
11055 CHECK_STACK_OVF (1);
11057 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11058 load->flags |= ins_flag;
11064 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11065 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11066 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11077 token = read32 (ip + 1);
11078 klass = mini_get_class (method, token, generic_context);
11079 CHECK_TYPELOAD (klass);
11080 if (ins_flag & MONO_INST_VOLATILE) {
11081 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11082 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11084 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11085 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11086 ins->flags |= ins_flag;
11087 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11088 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11089 /* insert call to write barrier */
11090 emit_write_barrier (cfg, sp [0], sp [1]);
11102 const char *data_ptr;
11104 guint32 field_token;
11110 token = read32 (ip + 1);
11112 klass = mini_get_class (method, token, generic_context);
11113 CHECK_TYPELOAD (klass);
11115 context_used = mini_class_check_context_used (cfg, klass);
11117 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11118 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11119 ins->sreg1 = sp [0]->dreg;
11120 ins->type = STACK_I4;
11121 ins->dreg = alloc_ireg (cfg);
11122 MONO_ADD_INS (cfg->cbb, ins);
11123 *sp = mono_decompose_opcode (cfg, ins);
11126 if (context_used) {
11127 MonoInst *args [3];
11128 MonoClass *array_class = mono_array_class_get (klass, 1);
11129 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11131 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11134 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11135 array_class, MONO_RGCTX_INFO_VTABLE);
11140 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11142 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11144 if (cfg->opt & MONO_OPT_SHARED) {
11145 /* Decompose now to avoid problems with references to the domainvar */
11146 MonoInst *iargs [3];
11148 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11149 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11150 iargs [2] = sp [0];
11152 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11154 /* Decompose later since it is needed by abcrem */
11155 MonoClass *array_type = mono_array_class_get (klass, 1);
11156 mono_class_vtable (cfg->domain, array_type);
11157 CHECK_TYPELOAD (array_type);
11159 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11160 ins->dreg = alloc_ireg_ref (cfg);
11161 ins->sreg1 = sp [0]->dreg;
11162 ins->inst_newa_class = klass;
11163 ins->type = STACK_OBJ;
11164 ins->klass = array_type;
11165 MONO_ADD_INS (cfg->cbb, ins);
11166 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11167 cfg->cbb->has_array_access = TRUE;
11169 /* Needed so mono_emit_load_get_addr () gets called */
11170 mono_get_got_var (cfg);
11180 * we inline/optimize the initialization sequence if possible.
11181 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11182 * for small sizes open code the memcpy
11183 * ensure the rva field is big enough
11185 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11186 MonoMethod *memcpy_method = get_memcpy_method ();
11187 MonoInst *iargs [3];
11188 int add_reg = alloc_ireg_mp (cfg);
11190 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11191 if (cfg->compile_aot) {
11192 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11194 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11196 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11197 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11206 if (sp [0]->type != STACK_OBJ)
11209 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11210 ins->dreg = alloc_preg (cfg);
11211 ins->sreg1 = sp [0]->dreg;
11212 ins->type = STACK_I4;
11213 /* This flag will be inherited by the decomposition */
11214 ins->flags |= MONO_INST_FAULT;
11215 MONO_ADD_INS (cfg->cbb, ins);
11216 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11217 cfg->cbb->has_array_access = TRUE;
11225 if (sp [0]->type != STACK_OBJ)
11228 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11230 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11231 CHECK_TYPELOAD (klass);
11232 /* we need to make sure that this array is exactly the type it needs
11233 * to be for correctness. the wrappers are lax with their usage
11234 * so we need to ignore them here
11236 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11237 MonoClass *array_class = mono_array_class_get (klass, 1);
11238 mini_emit_check_array_type (cfg, sp [0], array_class);
11239 CHECK_TYPELOAD (array_class);
11243 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11248 case CEE_LDELEM_I1:
11249 case CEE_LDELEM_U1:
11250 case CEE_LDELEM_I2:
11251 case CEE_LDELEM_U2:
11252 case CEE_LDELEM_I4:
11253 case CEE_LDELEM_U4:
11254 case CEE_LDELEM_I8:
11256 case CEE_LDELEM_R4:
11257 case CEE_LDELEM_R8:
11258 case CEE_LDELEM_REF: {
11264 if (*ip == CEE_LDELEM) {
11266 token = read32 (ip + 1);
11267 klass = mini_get_class (method, token, generic_context);
11268 CHECK_TYPELOAD (klass);
11269 mono_class_init (klass);
11272 klass = array_access_to_klass (*ip);
11274 if (sp [0]->type != STACK_OBJ)
11277 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11279 if (mini_is_gsharedvt_variable_klass (klass)) {
11280 // FIXME-VT: OP_ICONST optimization
11281 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11282 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11283 ins->opcode = OP_LOADV_MEMBASE;
11284 } else if (sp [1]->opcode == OP_ICONST) {
11285 int array_reg = sp [0]->dreg;
11286 int index_reg = sp [1]->dreg;
11287 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11289 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11290 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11292 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11293 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11295 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11296 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11299 if (*ip == CEE_LDELEM)
11306 case CEE_STELEM_I1:
11307 case CEE_STELEM_I2:
11308 case CEE_STELEM_I4:
11309 case CEE_STELEM_I8:
11310 case CEE_STELEM_R4:
11311 case CEE_STELEM_R8:
11312 case CEE_STELEM_REF:
11317 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11319 if (*ip == CEE_STELEM) {
11321 token = read32 (ip + 1);
11322 klass = mini_get_class (method, token, generic_context);
11323 CHECK_TYPELOAD (klass);
11324 mono_class_init (klass);
11327 klass = array_access_to_klass (*ip);
11329 if (sp [0]->type != STACK_OBJ)
11332 emit_array_store (cfg, klass, sp, TRUE);
11334 if (*ip == CEE_STELEM)
11341 case CEE_CKFINITE: {
11345 if (cfg->llvm_only) {
11346 MonoInst *iargs [1];
11348 iargs [0] = sp [0];
11349 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11351 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11352 ins->sreg1 = sp [0]->dreg;
11353 ins->dreg = alloc_freg (cfg);
11354 ins->type = STACK_R8;
11355 MONO_ADD_INS (cfg->cbb, ins);
11357 *sp++ = mono_decompose_opcode (cfg, ins);
11363 case CEE_REFANYVAL: {
11364 MonoInst *src_var, *src;
11366 int klass_reg = alloc_preg (cfg);
11367 int dreg = alloc_preg (cfg);
11369 GSHAREDVT_FAILURE (*ip);
11372 MONO_INST_NEW (cfg, ins, *ip);
11375 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11376 CHECK_TYPELOAD (klass);
11378 context_used = mini_class_check_context_used (cfg, klass);
11381 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11383 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11384 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11387 if (context_used) {
11388 MonoInst *klass_ins;
11390 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11391 klass, MONO_RGCTX_INFO_KLASS);
11394 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11395 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11397 mini_emit_class_check (cfg, klass_reg, klass);
11399 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11400 ins->type = STACK_MP;
11401 ins->klass = klass;
11406 case CEE_MKREFANY: {
11407 MonoInst *loc, *addr;
11409 GSHAREDVT_FAILURE (*ip);
11412 MONO_INST_NEW (cfg, ins, *ip);
11415 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11416 CHECK_TYPELOAD (klass);
11418 context_used = mini_class_check_context_used (cfg, klass);
11420 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11421 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11423 if (context_used) {
11424 MonoInst *const_ins;
11425 int type_reg = alloc_preg (cfg);
11427 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11428 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11430 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11432 int const_reg = alloc_preg (cfg);
11433 int type_reg = alloc_preg (cfg);
11435 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11436 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11437 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11438 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11440 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11442 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11443 ins->type = STACK_VTYPE;
11444 ins->klass = mono_defaults.typed_reference_class;
11449 case CEE_LDTOKEN: {
11451 MonoClass *handle_class;
11453 CHECK_STACK_OVF (1);
11456 n = read32 (ip + 1);
11458 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11459 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11460 handle = mono_method_get_wrapper_data (method, n);
11461 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11462 if (handle_class == mono_defaults.typehandle_class)
11463 handle = &((MonoClass*)handle)->byval_arg;
11466 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11471 mono_class_init (handle_class);
11472 if (cfg->gshared) {
11473 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11474 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11475 /* This case handles ldtoken
11476 of an open type, like for
11479 } else if (handle_class == mono_defaults.typehandle_class) {
11480 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11481 } else if (handle_class == mono_defaults.fieldhandle_class)
11482 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11483 else if (handle_class == mono_defaults.methodhandle_class)
11484 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11486 g_assert_not_reached ();
11489 if ((cfg->opt & MONO_OPT_SHARED) &&
11490 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11491 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11492 MonoInst *addr, *vtvar, *iargs [3];
11493 int method_context_used;
11495 method_context_used = mini_method_check_context_used (cfg, method);
11497 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11499 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11500 EMIT_NEW_ICONST (cfg, iargs [1], n);
11501 if (method_context_used) {
11502 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11503 method, MONO_RGCTX_INFO_METHOD);
11504 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11506 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11507 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11509 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11511 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11513 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11515 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11516 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11517 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11518 (cmethod->klass == mono_defaults.systemtype_class) &&
11519 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11520 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11522 mono_class_init (tclass);
11523 if (context_used) {
11524 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11525 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11526 } else if (cfg->compile_aot) {
11527 if (method->wrapper_type) {
11528 mono_error_init (&error); //got to do it since there are multiple conditionals below
11529 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11530 /* Special case for static synchronized wrappers */
11531 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11533 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11534 /* FIXME: n is not a normal token */
11536 EMIT_NEW_PCONST (cfg, ins, NULL);
11539 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11542 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11544 EMIT_NEW_PCONST (cfg, ins, rt);
11546 ins->type = STACK_OBJ;
11547 ins->klass = cmethod->klass;
11550 MonoInst *addr, *vtvar;
11552 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11554 if (context_used) {
11555 if (handle_class == mono_defaults.typehandle_class) {
11556 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11557 mono_class_from_mono_type ((MonoType *)handle),
11558 MONO_RGCTX_INFO_TYPE);
11559 } else if (handle_class == mono_defaults.methodhandle_class) {
11560 ins = emit_get_rgctx_method (cfg, context_used,
11561 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11562 } else if (handle_class == mono_defaults.fieldhandle_class) {
11563 ins = emit_get_rgctx_field (cfg, context_used,
11564 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11566 g_assert_not_reached ();
11568 } else if (cfg->compile_aot) {
11569 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11571 EMIT_NEW_PCONST (cfg, ins, handle);
11573 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11574 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11575 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11585 if (sp [-1]->type != STACK_OBJ)
11588 MONO_INST_NEW (cfg, ins, OP_THROW);
11590 ins->sreg1 = sp [0]->dreg;
11592 cfg->cbb->out_of_line = TRUE;
11593 MONO_ADD_INS (cfg->cbb, ins);
11594 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11595 MONO_ADD_INS (cfg->cbb, ins);
11598 link_bblock (cfg, cfg->cbb, end_bblock);
11599 start_new_bblock = 1;
11600 /* This can complicate code generation for llvm since the return value might not be defined */
11601 if (COMPILE_LLVM (cfg))
11602 INLINE_FAILURE ("throw");
11604 case CEE_ENDFINALLY:
11605 if (!ip_in_finally_clause (cfg, ip - header->code))
11607 /* mono_save_seq_point_info () depends on this */
11608 if (sp != stack_start)
11609 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11610 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11611 MONO_ADD_INS (cfg->cbb, ins);
11613 start_new_bblock = 1;
11616 * Control will leave the method so empty the stack, otherwise
11617 * the next basic block will start with a nonempty stack.
11619 while (sp != stack_start) {
11624 case CEE_LEAVE_S: {
11627 if (*ip == CEE_LEAVE) {
11629 target = ip + 5 + (gint32)read32(ip + 1);
11632 target = ip + 2 + (signed char)(ip [1]);
11635 /* empty the stack */
11636 while (sp != stack_start) {
11641 * If this leave statement is in a catch block, check for a
11642 * pending exception, and rethrow it if necessary.
11643 * We avoid doing this in runtime invoke wrappers, since those are called
11644 * by native code which excepts the wrapper to catch all exceptions.
11646 for (i = 0; i < header->num_clauses; ++i) {
11647 MonoExceptionClause *clause = &header->clauses [i];
11650 * Use <= in the final comparison to handle clauses with multiple
11651 * leave statements, like in bug #78024.
11652 * The ordering of the exception clauses guarantees that we find the
11653 * innermost clause.
11655 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11657 MonoBasicBlock *dont_throw;
11662 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11665 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11667 NEW_BBLOCK (cfg, dont_throw);
11670 * Currently, we always rethrow the abort exception, despite the
11671 * fact that this is not correct. See thread6.cs for an example.
11672 * But propagating the abort exception is more important than
11673 * getting the sematics right.
11675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11677 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11679 MONO_START_BB (cfg, dont_throw);
11684 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11687 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11689 MonoExceptionClause *clause;
11691 for (tmp = handlers; tmp; tmp = tmp->next) {
11692 clause = (MonoExceptionClause *)tmp->data;
11693 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11695 link_bblock (cfg, cfg->cbb, tblock);
11696 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11697 ins->inst_target_bb = tblock;
11698 ins->inst_eh_block = clause;
11699 MONO_ADD_INS (cfg->cbb, ins);
11700 cfg->cbb->has_call_handler = 1;
11701 if (COMPILE_LLVM (cfg)) {
11702 MonoBasicBlock *target_bb;
11705 * Link the finally bblock with the target, since it will
11706 * conceptually branch there.
11708 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11709 GET_BBLOCK (cfg, target_bb, target);
11710 link_bblock (cfg, tblock, target_bb);
11713 g_list_free (handlers);
11716 MONO_INST_NEW (cfg, ins, OP_BR);
11717 MONO_ADD_INS (cfg->cbb, ins);
11718 GET_BBLOCK (cfg, tblock, target);
11719 link_bblock (cfg, cfg->cbb, tblock);
11720 ins->inst_target_bb = tblock;
11722 start_new_bblock = 1;
11724 if (*ip == CEE_LEAVE)
11733 * Mono specific opcodes
11735 case MONO_CUSTOM_PREFIX: {
11737 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11741 case CEE_MONO_ICALL: {
11743 MonoJitICallInfo *info;
11745 token = read32 (ip + 2);
11746 func = mono_method_get_wrapper_data (method, token);
11747 info = mono_find_jit_icall_by_addr (func);
11749 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11752 CHECK_STACK (info->sig->param_count);
11753 sp -= info->sig->param_count;
11755 ins = mono_emit_jit_icall (cfg, info->func, sp);
11756 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11760 inline_costs += 10 * num_calls++;
11764 case CEE_MONO_LDPTR_CARD_TABLE:
11765 case CEE_MONO_LDPTR_NURSERY_START:
11766 case CEE_MONO_LDPTR_NURSERY_BITS:
11767 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11768 CHECK_STACK_OVF (1);
11771 case CEE_MONO_LDPTR_CARD_TABLE:
11772 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11774 case CEE_MONO_LDPTR_NURSERY_START:
11775 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11777 case CEE_MONO_LDPTR_NURSERY_BITS:
11778 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11780 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11781 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11787 inline_costs += 10 * num_calls++;
11790 case CEE_MONO_LDPTR: {
11793 CHECK_STACK_OVF (1);
11795 token = read32 (ip + 2);
11797 ptr = mono_method_get_wrapper_data (method, token);
11798 EMIT_NEW_PCONST (cfg, ins, ptr);
11801 inline_costs += 10 * num_calls++;
11802 /* Can't embed random pointers into AOT code */
11806 case CEE_MONO_JIT_ICALL_ADDR: {
11807 MonoJitICallInfo *callinfo;
11810 CHECK_STACK_OVF (1);
11812 token = read32 (ip + 2);
11814 ptr = mono_method_get_wrapper_data (method, token);
11815 callinfo = mono_find_jit_icall_by_addr (ptr);
11816 g_assert (callinfo);
11817 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11820 inline_costs += 10 * num_calls++;
11823 case CEE_MONO_ICALL_ADDR: {
11824 MonoMethod *cmethod;
11827 CHECK_STACK_OVF (1);
11829 token = read32 (ip + 2);
11831 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11833 if (cfg->compile_aot) {
11834 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11836 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11837 * before the call, its not needed when using direct pinvoke.
11838 * This is not an optimization, but its used to avoid looking up pinvokes
11839 * on platforms which don't support dlopen ().
11841 EMIT_NEW_PCONST (cfg, ins, NULL);
11843 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11846 ptr = mono_lookup_internal_call (cmethod);
11848 EMIT_NEW_PCONST (cfg, ins, ptr);
11854 case CEE_MONO_VTADDR: {
11855 MonoInst *src_var, *src;
11861 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11862 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11867 case CEE_MONO_NEWOBJ: {
11868 MonoInst *iargs [2];
11870 CHECK_STACK_OVF (1);
11872 token = read32 (ip + 2);
11873 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11874 mono_class_init (klass);
11875 NEW_DOMAINCONST (cfg, iargs [0]);
11876 MONO_ADD_INS (cfg->cbb, iargs [0]);
11877 NEW_CLASSCONST (cfg, iargs [1], klass);
11878 MONO_ADD_INS (cfg->cbb, iargs [1]);
11879 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11881 inline_costs += 10 * num_calls++;
11884 case CEE_MONO_OBJADDR:
11887 MONO_INST_NEW (cfg, ins, OP_MOVE);
11888 ins->dreg = alloc_ireg_mp (cfg);
11889 ins->sreg1 = sp [0]->dreg;
11890 ins->type = STACK_MP;
11891 MONO_ADD_INS (cfg->cbb, ins);
11895 case CEE_MONO_LDNATIVEOBJ:
11897 * Similar to LDOBJ, but instead load the unmanaged
11898 * representation of the vtype to the stack.
11903 token = read32 (ip + 2);
11904 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11905 g_assert (klass->valuetype);
11906 mono_class_init (klass);
11909 MonoInst *src, *dest, *temp;
11912 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11913 temp->backend.is_pinvoke = 1;
11914 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11915 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11917 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11918 dest->type = STACK_VTYPE;
11919 dest->klass = klass;
11925 case CEE_MONO_RETOBJ: {
11927 * Same as RET, but return the native representation of a vtype
11930 g_assert (cfg->ret);
11931 g_assert (mono_method_signature (method)->pinvoke);
11936 token = read32 (ip + 2);
11937 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11939 if (!cfg->vret_addr) {
11940 g_assert (cfg->ret_var_is_local);
11942 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11944 EMIT_NEW_RETLOADA (cfg, ins);
11946 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11948 if (sp != stack_start)
11951 MONO_INST_NEW (cfg, ins, OP_BR);
11952 ins->inst_target_bb = end_bblock;
11953 MONO_ADD_INS (cfg->cbb, ins);
11954 link_bblock (cfg, cfg->cbb, end_bblock);
11955 start_new_bblock = 1;
11959 case CEE_MONO_SAVE_LMF:
11960 case CEE_MONO_RESTORE_LMF:
11963 case CEE_MONO_CLASSCONST:
11964 CHECK_STACK_OVF (1);
11966 token = read32 (ip + 2);
11967 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11970 inline_costs += 10 * num_calls++;
11972 case CEE_MONO_NOT_TAKEN:
11973 cfg->cbb->out_of_line = TRUE;
11976 case CEE_MONO_TLS: {
11979 CHECK_STACK_OVF (1);
11981 key = (MonoTlsKey)read32 (ip + 2);
11982 g_assert (key < TLS_KEY_NUM);
11984 ins = mono_create_tls_get (cfg, key);
11986 if (cfg->compile_aot) {
11988 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11989 ins->dreg = alloc_preg (cfg);
11990 ins->type = STACK_PTR;
11992 g_assert_not_reached ();
11995 ins->type = STACK_PTR;
11996 MONO_ADD_INS (cfg->cbb, ins);
12001 case CEE_MONO_DYN_CALL: {
12002 MonoCallInst *call;
12004 /* It would be easier to call a trampoline, but that would put an
12005 * extra frame on the stack, confusing exception handling. So
12006 * implement it inline using an opcode for now.
12009 if (!cfg->dyn_call_var) {
12010 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12011 /* prevent it from being register allocated */
12012 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12015 /* Has to use a call inst since it local regalloc expects it */
12016 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12017 ins = (MonoInst*)call;
12019 ins->sreg1 = sp [0]->dreg;
12020 ins->sreg2 = sp [1]->dreg;
12021 MONO_ADD_INS (cfg->cbb, ins);
12023 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12026 inline_costs += 10 * num_calls++;
12030 case CEE_MONO_MEMORY_BARRIER: {
12032 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12036 case CEE_MONO_ATOMIC_STORE_I4: {
12037 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12043 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12044 ins->dreg = sp [0]->dreg;
12045 ins->sreg1 = sp [1]->dreg;
12046 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12047 MONO_ADD_INS (cfg->cbb, ins);
12052 case CEE_MONO_JIT_ATTACH: {
12053 MonoInst *args [16], *domain_ins;
12054 MonoInst *ad_ins, *jit_tls_ins;
12055 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12057 g_assert (!mono_threads_is_coop_enabled ());
12059 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12061 EMIT_NEW_PCONST (cfg, ins, NULL);
12062 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12064 ad_ins = mono_get_domain_intrinsic (cfg);
12065 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12067 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12068 NEW_BBLOCK (cfg, next_bb);
12069 NEW_BBLOCK (cfg, call_bb);
12071 if (cfg->compile_aot) {
12072 /* AOT code is only used in the root domain */
12073 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12075 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12077 MONO_ADD_INS (cfg->cbb, ad_ins);
12078 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12079 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12081 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12082 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12083 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12085 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12086 MONO_START_BB (cfg, call_bb);
12089 /* AOT code is only used in the root domain */
12090 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12091 if (cfg->compile_aot) {
12095 * This is called on unattached threads, so it cannot go through the trampoline
12096 * infrastructure. Use an indirect call through a got slot initialized at load time
12099 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12100 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12102 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12104 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12107 MONO_START_BB (cfg, next_bb);
12112 case CEE_MONO_JIT_DETACH: {
12113 MonoInst *args [16];
12115 /* Restore the original domain */
12116 dreg = alloc_ireg (cfg);
12117 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12118 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12122 case CEE_MONO_CALLI_EXTRA_ARG: {
12124 MonoMethodSignature *fsig;
12128 * This is the same as CEE_CALLI, but passes an additional argument
12129 * to the called method in llvmonly mode.
12130 * This is only used by delegate invoke wrappers to call the
12131 * actual delegate method.
12133 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12136 token = read32 (ip + 2);
12144 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12147 if (cfg->llvm_only)
12148 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12150 n = fsig->param_count + fsig->hasthis + 1;
12157 if (cfg->llvm_only) {
12159 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12160 * cconv. This is set by mono_init_delegate ().
12162 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12163 MonoInst *callee = addr;
12164 MonoInst *call, *localloc_ins;
12165 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12166 int low_bit_reg = alloc_preg (cfg);
12168 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12169 NEW_BBLOCK (cfg, end_bb);
12171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12172 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12173 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12175 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12176 addr = emit_get_rgctx_sig (cfg, context_used,
12177 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12179 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12181 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12182 ins->dreg = alloc_preg (cfg);
12183 ins->inst_imm = 2 * SIZEOF_VOID_P;
12184 MONO_ADD_INS (cfg->cbb, ins);
12185 localloc_ins = ins;
12186 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12187 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12188 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12190 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12191 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12193 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12194 MONO_START_BB (cfg, is_gsharedvt_bb);
12195 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12196 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12197 ins->dreg = call->dreg;
12199 MONO_START_BB (cfg, end_bb);
12201 /* Caller uses a normal calling conv */
12203 MonoInst *callee = addr;
12204 MonoInst *call, *localloc_ins;
12205 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12206 int low_bit_reg = alloc_preg (cfg);
12208 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12209 NEW_BBLOCK (cfg, end_bb);
12211 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12212 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12213 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12215 /* Normal case: callee uses a normal cconv, no conversion is needed */
12216 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12217 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12218 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12219 MONO_START_BB (cfg, is_gsharedvt_bb);
12220 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12221 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12222 MONO_ADD_INS (cfg->cbb, addr);
12224 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12226 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12227 ins->dreg = alloc_preg (cfg);
12228 ins->inst_imm = 2 * SIZEOF_VOID_P;
12229 MONO_ADD_INS (cfg->cbb, ins);
12230 localloc_ins = ins;
12231 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12232 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12233 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12235 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12236 ins->dreg = call->dreg;
12237 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12239 MONO_START_BB (cfg, end_bb);
12242 /* Same as CEE_CALLI */
12243 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12245 * We pass the address to the gsharedvt trampoline in the rgctx reg
12247 MonoInst *callee = addr;
12249 addr = emit_get_rgctx_sig (cfg, context_used,
12250 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12251 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12253 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12257 if (!MONO_TYPE_IS_VOID (fsig->ret))
12258 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12260 CHECK_CFG_EXCEPTION;
12264 constrained_class = NULL;
12267 case CEE_MONO_LDDOMAIN:
12268 CHECK_STACK_OVF (1);
12269 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12273 case CEE_MONO_GET_LAST_ERROR:
12275 CHECK_STACK_OVF (1);
12277 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12278 ins->dreg = alloc_dreg (cfg, STACK_I4);
12279 ins->type = STACK_I4;
12280 MONO_ADD_INS (cfg->cbb, ins);
12286 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12292 case CEE_PREFIX1: {
12295 case CEE_ARGLIST: {
12296 /* somewhat similar to LDTOKEN */
12297 MonoInst *addr, *vtvar;
12298 CHECK_STACK_OVF (1);
12299 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12301 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12302 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12304 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12305 ins->type = STACK_VTYPE;
12306 ins->klass = mono_defaults.argumenthandle_class;
12316 MonoInst *cmp, *arg1, *arg2;
12324 * The following transforms:
12325 * CEE_CEQ into OP_CEQ
12326 * CEE_CGT into OP_CGT
12327 * CEE_CGT_UN into OP_CGT_UN
12328 * CEE_CLT into OP_CLT
12329 * CEE_CLT_UN into OP_CLT_UN
12331 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12333 MONO_INST_NEW (cfg, ins, cmp->opcode);
12334 cmp->sreg1 = arg1->dreg;
12335 cmp->sreg2 = arg2->dreg;
12336 type_from_op (cfg, cmp, arg1, arg2);
12338 add_widen_op (cfg, cmp, &arg1, &arg2);
12339 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12340 cmp->opcode = OP_LCOMPARE;
12341 else if (arg1->type == STACK_R4)
12342 cmp->opcode = OP_RCOMPARE;
12343 else if (arg1->type == STACK_R8)
12344 cmp->opcode = OP_FCOMPARE;
12346 cmp->opcode = OP_ICOMPARE;
12347 MONO_ADD_INS (cfg->cbb, cmp);
12348 ins->type = STACK_I4;
12349 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12350 type_from_op (cfg, ins, arg1, arg2);
12352 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12354 * The backends expect the fceq opcodes to do the
12357 ins->sreg1 = cmp->sreg1;
12358 ins->sreg2 = cmp->sreg2;
12361 MONO_ADD_INS (cfg->cbb, ins);
12367 MonoInst *argconst;
12368 MonoMethod *cil_method;
12370 CHECK_STACK_OVF (1);
12372 n = read32 (ip + 2);
12373 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12376 mono_class_init (cmethod->klass);
12378 mono_save_token_info (cfg, image, n, cmethod);
12380 context_used = mini_method_check_context_used (cfg, cmethod);
12382 cil_method = cmethod;
12383 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12384 emit_method_access_failure (cfg, method, cil_method);
12386 if (mono_security_core_clr_enabled ())
12387 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12390 * Optimize the common case of ldftn+delegate creation
12392 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12393 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12394 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12395 MonoInst *target_ins, *handle_ins;
12396 MonoMethod *invoke;
12397 int invoke_context_used;
12399 invoke = mono_get_delegate_invoke (ctor_method->klass);
12400 if (!invoke || !mono_method_signature (invoke))
12403 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12405 target_ins = sp [-1];
12407 if (mono_security_core_clr_enabled ())
12408 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12410 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12411 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12412 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12413 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12414 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12418 /* FIXME: SGEN support */
12419 if (invoke_context_used == 0 || cfg->llvm_only) {
12421 if (cfg->verbose_level > 3)
12422 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12423 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12426 CHECK_CFG_EXCEPTION;
12436 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12437 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12441 inline_costs += 10 * num_calls++;
12444 case CEE_LDVIRTFTN: {
12445 MonoInst *args [2];
12449 n = read32 (ip + 2);
12450 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12453 mono_class_init (cmethod->klass);
12455 context_used = mini_method_check_context_used (cfg, cmethod);
12457 if (mono_security_core_clr_enabled ())
12458 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12461 * Optimize the common case of ldvirtftn+delegate creation
12463 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12464 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12465 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12466 MonoInst *target_ins, *handle_ins;
12467 MonoMethod *invoke;
12468 int invoke_context_used;
12469 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12471 invoke = mono_get_delegate_invoke (ctor_method->klass);
12472 if (!invoke || !mono_method_signature (invoke))
12475 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12477 target_ins = sp [-1];
12479 if (mono_security_core_clr_enabled ())
12480 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12482 /* FIXME: SGEN support */
12483 if (invoke_context_used == 0 || cfg->llvm_only) {
12485 if (cfg->verbose_level > 3)
12486 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12487 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12490 CHECK_CFG_EXCEPTION;
12503 args [1] = emit_get_rgctx_method (cfg, context_used,
12504 cmethod, MONO_RGCTX_INFO_METHOD);
12507 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12509 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12512 inline_costs += 10 * num_calls++;
12516 CHECK_STACK_OVF (1);
12518 n = read16 (ip + 2);
12520 EMIT_NEW_ARGLOAD (cfg, ins, n);
12525 CHECK_STACK_OVF (1);
12527 n = read16 (ip + 2);
12529 NEW_ARGLOADA (cfg, ins, n);
12530 MONO_ADD_INS (cfg->cbb, ins);
12538 n = read16 (ip + 2);
12540 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12542 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12546 CHECK_STACK_OVF (1);
12548 n = read16 (ip + 2);
12550 EMIT_NEW_LOCLOAD (cfg, ins, n);
12555 unsigned char *tmp_ip;
12556 CHECK_STACK_OVF (1);
12558 n = read16 (ip + 2);
12561 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12567 EMIT_NEW_LOCLOADA (cfg, ins, n);
12576 n = read16 (ip + 2);
12578 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12580 emit_stloc_ir (cfg, sp, header, n);
12584 case CEE_LOCALLOC: {
12586 MonoBasicBlock *non_zero_bb, *end_bb;
12587 int alloc_ptr = alloc_preg (cfg);
12589 if (sp != stack_start)
12591 if (cfg->method != method)
12593 * Inlining this into a loop in a parent could lead to
12594 * stack overflows which is different behavior than the
12595 * non-inlined case, thus disable inlining in this case.
12597 INLINE_FAILURE("localloc");
12599 NEW_BBLOCK (cfg, non_zero_bb);
12600 NEW_BBLOCK (cfg, end_bb);
12602 /* if size != zero */
12603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12606 //size is zero, so result is NULL
12607 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12608 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12610 MONO_START_BB (cfg, non_zero_bb);
12611 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12612 ins->dreg = alloc_ptr;
12613 ins->sreg1 = sp [0]->dreg;
12614 ins->type = STACK_PTR;
12615 MONO_ADD_INS (cfg->cbb, ins);
12617 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12619 ins->flags |= MONO_INST_INIT;
12621 MONO_START_BB (cfg, end_bb);
12622 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12623 ins->type = STACK_PTR;
12629 case CEE_ENDFILTER: {
12630 MonoExceptionClause *clause, *nearest;
12635 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12637 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12638 ins->sreg1 = (*sp)->dreg;
12639 MONO_ADD_INS (cfg->cbb, ins);
12640 start_new_bblock = 1;
12644 for (cc = 0; cc < header->num_clauses; ++cc) {
12645 clause = &header->clauses [cc];
12646 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12647 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12648 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12651 g_assert (nearest);
12652 if ((ip - header->code) != nearest->handler_offset)
12657 case CEE_UNALIGNED_:
12658 ins_flag |= MONO_INST_UNALIGNED;
12659 /* FIXME: record alignment? we can assume 1 for now */
12663 case CEE_VOLATILE_:
12664 ins_flag |= MONO_INST_VOLATILE;
12668 ins_flag |= MONO_INST_TAILCALL;
12669 cfg->flags |= MONO_CFG_HAS_TAIL;
12670 /* Can't inline tail calls at this time */
12671 inline_costs += 100000;
12678 token = read32 (ip + 2);
12679 klass = mini_get_class (method, token, generic_context);
12680 CHECK_TYPELOAD (klass);
12681 if (generic_class_is_reference_type (cfg, klass))
12682 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12684 mini_emit_initobj (cfg, *sp, NULL, klass);
12688 case CEE_CONSTRAINED_:
12690 token = read32 (ip + 2);
12691 constrained_class = mini_get_class (method, token, generic_context);
12692 CHECK_TYPELOAD (constrained_class);
12696 case CEE_INITBLK: {
12697 MonoInst *iargs [3];
12701 /* Skip optimized paths for volatile operations. */
12702 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12703 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12704 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12705 /* emit_memset only works when val == 0 */
12706 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12709 iargs [0] = sp [0];
12710 iargs [1] = sp [1];
12711 iargs [2] = sp [2];
12712 if (ip [1] == CEE_CPBLK) {
12714 * FIXME: It's unclear whether we should be emitting both the acquire
12715 * and release barriers for cpblk. It is technically both a load and
12716 * store operation, so it seems like that's the sensible thing to do.
12718 * FIXME: We emit full barriers on both sides of the operation for
12719 * simplicity. We should have a separate atomic memcpy method instead.
12721 MonoMethod *memcpy_method = get_memcpy_method ();
12723 if (ins_flag & MONO_INST_VOLATILE)
12724 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12726 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12727 call->flags |= ins_flag;
12729 if (ins_flag & MONO_INST_VOLATILE)
12730 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12732 MonoMethod *memset_method = get_memset_method ();
12733 if (ins_flag & MONO_INST_VOLATILE) {
12734 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12735 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12737 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12738 call->flags |= ins_flag;
12749 ins_flag |= MONO_INST_NOTYPECHECK;
12751 ins_flag |= MONO_INST_NORANGECHECK;
12752 /* we ignore the no-nullcheck for now since we
12753 * really do it explicitly only when doing callvirt->call
12757 case CEE_RETHROW: {
12759 int handler_offset = -1;
12761 for (i = 0; i < header->num_clauses; ++i) {
12762 MonoExceptionClause *clause = &header->clauses [i];
12763 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12764 handler_offset = clause->handler_offset;
12769 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12771 if (handler_offset == -1)
12774 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12775 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12776 ins->sreg1 = load->dreg;
12777 MONO_ADD_INS (cfg->cbb, ins);
12779 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12780 MONO_ADD_INS (cfg->cbb, ins);
12783 link_bblock (cfg, cfg->cbb, end_bblock);
12784 start_new_bblock = 1;
12792 CHECK_STACK_OVF (1);
12794 token = read32 (ip + 2);
12795 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12796 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12799 val = mono_type_size (type, &ialign);
12801 MonoClass *klass = mini_get_class (method, token, generic_context);
12802 CHECK_TYPELOAD (klass);
12804 val = mono_type_size (&klass->byval_arg, &ialign);
12806 if (mini_is_gsharedvt_klass (klass))
12807 GSHAREDVT_FAILURE (*ip);
12809 EMIT_NEW_ICONST (cfg, ins, val);
12814 case CEE_REFANYTYPE: {
12815 MonoInst *src_var, *src;
12817 GSHAREDVT_FAILURE (*ip);
12823 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12825 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12826 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12827 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12832 case CEE_READONLY_:
12845 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12855 g_warning ("opcode 0x%02x not handled", *ip);
12859 if (start_new_bblock != 1)
12862 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12863 if (cfg->cbb->next_bb) {
12864 /* This could already be set because of inlining, #693905 */
12865 MonoBasicBlock *bb = cfg->cbb;
12867 while (bb->next_bb)
12869 bb->next_bb = end_bblock;
12871 cfg->cbb->next_bb = end_bblock;
12874 if (cfg->method == method && cfg->domainvar) {
12876 MonoInst *get_domain;
12878 cfg->cbb = init_localsbb;
12880 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12881 MONO_ADD_INS (cfg->cbb, get_domain);
12883 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12885 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12886 MONO_ADD_INS (cfg->cbb, store);
12889 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12890 if (cfg->compile_aot)
12891 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12892 mono_get_got_var (cfg);
12895 if (cfg->method == method && cfg->got_var)
12896 mono_emit_load_got_addr (cfg);
12898 if (init_localsbb) {
12899 cfg->cbb = init_localsbb;
12901 for (i = 0; i < header->num_locals; ++i) {
12902 emit_init_local (cfg, i, header->locals [i], init_locals);
12906 if (cfg->init_ref_vars && cfg->method == method) {
12907 /* Emit initialization for ref vars */
12908 // FIXME: Avoid duplication initialization for IL locals.
12909 for (i = 0; i < cfg->num_varinfo; ++i) {
12910 MonoInst *ins = cfg->varinfo [i];
12912 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12913 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12917 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12918 cfg->cbb = init_localsbb;
12919 emit_push_lmf (cfg);
12922 cfg->cbb = init_localsbb;
12923 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12926 MonoBasicBlock *bb;
12929 * Make seq points at backward branch targets interruptable.
12931 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12932 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12933 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12936 /* Add a sequence point for method entry/exit events */
12937 if (seq_points && cfg->gen_sdb_seq_points) {
12938 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12939 MONO_ADD_INS (init_localsbb, ins);
12940 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12941 MONO_ADD_INS (cfg->bb_exit, ins);
12945 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12946 * the code they refer to was dead (#11880).
12948 if (sym_seq_points) {
12949 for (i = 0; i < header->code_size; ++i) {
12950 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12953 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12954 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12961 if (cfg->method == method) {
12962 MonoBasicBlock *bb;
12963 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12964 if (bb == cfg->bb_init)
12967 bb->region = mono_find_block_region (cfg, bb->real_offset);
12969 mono_create_spvar_for_region (cfg, bb->region);
12970 if (cfg->verbose_level > 2)
12971 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12974 MonoBasicBlock *bb;
12975 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
12976 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
12977 bb->real_offset = inline_offset;
12981 if (inline_costs < 0) {
12984 /* Method is too large */
12985 mname = mono_method_full_name (method, TRUE);
12986 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
12990 if ((cfg->verbose_level > 2) && (cfg->method == method))
12991 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12996 g_assert (!mono_error_ok (&cfg->error));
13000 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13004 set_exception_type_from_invalid_il (cfg, method, ip);
13008 g_slist_free (class_inits);
13009 mono_basic_block_free (original_bb);
13010 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13011 if (cfg->exception_type)
13014 return inline_costs;
13018 store_membase_reg_to_store_membase_imm (int opcode)
13021 case OP_STORE_MEMBASE_REG:
13022 return OP_STORE_MEMBASE_IMM;
13023 case OP_STOREI1_MEMBASE_REG:
13024 return OP_STOREI1_MEMBASE_IMM;
13025 case OP_STOREI2_MEMBASE_REG:
13026 return OP_STOREI2_MEMBASE_IMM;
13027 case OP_STOREI4_MEMBASE_REG:
13028 return OP_STOREI4_MEMBASE_IMM;
13029 case OP_STOREI8_MEMBASE_REG:
13030 return OP_STOREI8_MEMBASE_IMM;
13032 g_assert_not_reached ();
13039 mono_op_to_op_imm (int opcode)
13043 return OP_IADD_IMM;
13045 return OP_ISUB_IMM;
13047 return OP_IDIV_IMM;
13049 return OP_IDIV_UN_IMM;
13051 return OP_IREM_IMM;
13053 return OP_IREM_UN_IMM;
13055 return OP_IMUL_IMM;
13057 return OP_IAND_IMM;
13061 return OP_IXOR_IMM;
13063 return OP_ISHL_IMM;
13065 return OP_ISHR_IMM;
13067 return OP_ISHR_UN_IMM;
13070 return OP_LADD_IMM;
13072 return OP_LSUB_IMM;
13074 return OP_LAND_IMM;
13078 return OP_LXOR_IMM;
13080 return OP_LSHL_IMM;
13082 return OP_LSHR_IMM;
13084 return OP_LSHR_UN_IMM;
13085 #if SIZEOF_REGISTER == 8
13087 return OP_LREM_IMM;
13091 return OP_COMPARE_IMM;
13093 return OP_ICOMPARE_IMM;
13095 return OP_LCOMPARE_IMM;
13097 case OP_STORE_MEMBASE_REG:
13098 return OP_STORE_MEMBASE_IMM;
13099 case OP_STOREI1_MEMBASE_REG:
13100 return OP_STOREI1_MEMBASE_IMM;
13101 case OP_STOREI2_MEMBASE_REG:
13102 return OP_STOREI2_MEMBASE_IMM;
13103 case OP_STOREI4_MEMBASE_REG:
13104 return OP_STOREI4_MEMBASE_IMM;
13106 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13108 return OP_X86_PUSH_IMM;
13109 case OP_X86_COMPARE_MEMBASE_REG:
13110 return OP_X86_COMPARE_MEMBASE_IMM;
13112 #if defined(TARGET_AMD64)
13113 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13114 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13116 case OP_VOIDCALL_REG:
13117 return OP_VOIDCALL;
13125 return OP_LOCALLOC_IMM;
13132 ldind_to_load_membase (int opcode)
13136 return OP_LOADI1_MEMBASE;
13138 return OP_LOADU1_MEMBASE;
13140 return OP_LOADI2_MEMBASE;
13142 return OP_LOADU2_MEMBASE;
13144 return OP_LOADI4_MEMBASE;
13146 return OP_LOADU4_MEMBASE;
13148 return OP_LOAD_MEMBASE;
13149 case CEE_LDIND_REF:
13150 return OP_LOAD_MEMBASE;
13152 return OP_LOADI8_MEMBASE;
13154 return OP_LOADR4_MEMBASE;
13156 return OP_LOADR8_MEMBASE;
13158 g_assert_not_reached ();
13165 stind_to_store_membase (int opcode)
13169 return OP_STOREI1_MEMBASE_REG;
13171 return OP_STOREI2_MEMBASE_REG;
13173 return OP_STOREI4_MEMBASE_REG;
13175 case CEE_STIND_REF:
13176 return OP_STORE_MEMBASE_REG;
13178 return OP_STOREI8_MEMBASE_REG;
13180 return OP_STORER4_MEMBASE_REG;
13182 return OP_STORER8_MEMBASE_REG;
13184 g_assert_not_reached ();
13191 mono_load_membase_to_load_mem (int opcode)
13193 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13194 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13196 case OP_LOAD_MEMBASE:
13197 return OP_LOAD_MEM;
13198 case OP_LOADU1_MEMBASE:
13199 return OP_LOADU1_MEM;
13200 case OP_LOADU2_MEMBASE:
13201 return OP_LOADU2_MEM;
13202 case OP_LOADI4_MEMBASE:
13203 return OP_LOADI4_MEM;
13204 case OP_LOADU4_MEMBASE:
13205 return OP_LOADU4_MEM;
13206 #if SIZEOF_REGISTER == 8
13207 case OP_LOADI8_MEMBASE:
13208 return OP_LOADI8_MEM;
13217 op_to_op_dest_membase (int store_opcode, int opcode)
13219 #if defined(TARGET_X86)
13220 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13225 return OP_X86_ADD_MEMBASE_REG;
13227 return OP_X86_SUB_MEMBASE_REG;
13229 return OP_X86_AND_MEMBASE_REG;
13231 return OP_X86_OR_MEMBASE_REG;
13233 return OP_X86_XOR_MEMBASE_REG;
13236 return OP_X86_ADD_MEMBASE_IMM;
13239 return OP_X86_SUB_MEMBASE_IMM;
13242 return OP_X86_AND_MEMBASE_IMM;
13245 return OP_X86_OR_MEMBASE_IMM;
13248 return OP_X86_XOR_MEMBASE_IMM;
13254 #if defined(TARGET_AMD64)
13255 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13260 return OP_X86_ADD_MEMBASE_REG;
13262 return OP_X86_SUB_MEMBASE_REG;
13264 return OP_X86_AND_MEMBASE_REG;
13266 return OP_X86_OR_MEMBASE_REG;
13268 return OP_X86_XOR_MEMBASE_REG;
13270 return OP_X86_ADD_MEMBASE_IMM;
13272 return OP_X86_SUB_MEMBASE_IMM;
13274 return OP_X86_AND_MEMBASE_IMM;
13276 return OP_X86_OR_MEMBASE_IMM;
13278 return OP_X86_XOR_MEMBASE_IMM;
13280 return OP_AMD64_ADD_MEMBASE_REG;
13282 return OP_AMD64_SUB_MEMBASE_REG;
13284 return OP_AMD64_AND_MEMBASE_REG;
13286 return OP_AMD64_OR_MEMBASE_REG;
13288 return OP_AMD64_XOR_MEMBASE_REG;
13291 return OP_AMD64_ADD_MEMBASE_IMM;
13294 return OP_AMD64_SUB_MEMBASE_IMM;
13297 return OP_AMD64_AND_MEMBASE_IMM;
13300 return OP_AMD64_OR_MEMBASE_IMM;
13303 return OP_AMD64_XOR_MEMBASE_IMM;
13313 op_to_op_store_membase (int store_opcode, int opcode)
13315 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13318 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13319 return OP_X86_SETEQ_MEMBASE;
13321 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13322 return OP_X86_SETNE_MEMBASE;
13330 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13333 /* FIXME: This has sign extension issues */
13335 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13336 return OP_X86_COMPARE_MEMBASE8_IMM;
13339 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13344 return OP_X86_PUSH_MEMBASE;
13345 case OP_COMPARE_IMM:
13346 case OP_ICOMPARE_IMM:
13347 return OP_X86_COMPARE_MEMBASE_IMM;
13350 return OP_X86_COMPARE_MEMBASE_REG;
13354 #ifdef TARGET_AMD64
13355 /* FIXME: This has sign extension issues */
13357 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13358 return OP_X86_COMPARE_MEMBASE8_IMM;
13363 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13364 return OP_X86_PUSH_MEMBASE;
13366 /* FIXME: This only works for 32 bit immediates
13367 case OP_COMPARE_IMM:
13368 case OP_LCOMPARE_IMM:
13369 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13370 return OP_AMD64_COMPARE_MEMBASE_IMM;
13372 case OP_ICOMPARE_IMM:
13373 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13374 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13378 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13379 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13380 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13381 return OP_AMD64_COMPARE_MEMBASE_REG;
13384 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13385 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13394 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13397 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13403 return OP_X86_COMPARE_REG_MEMBASE;
13405 return OP_X86_ADD_REG_MEMBASE;
13407 return OP_X86_SUB_REG_MEMBASE;
13409 return OP_X86_AND_REG_MEMBASE;
13411 return OP_X86_OR_REG_MEMBASE;
13413 return OP_X86_XOR_REG_MEMBASE;
13417 #ifdef TARGET_AMD64
13418 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13421 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13423 return OP_X86_ADD_REG_MEMBASE;
13425 return OP_X86_SUB_REG_MEMBASE;
13427 return OP_X86_AND_REG_MEMBASE;
13429 return OP_X86_OR_REG_MEMBASE;
13431 return OP_X86_XOR_REG_MEMBASE;
13433 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13437 return OP_AMD64_COMPARE_REG_MEMBASE;
13439 return OP_AMD64_ADD_REG_MEMBASE;
13441 return OP_AMD64_SUB_REG_MEMBASE;
13443 return OP_AMD64_AND_REG_MEMBASE;
13445 return OP_AMD64_OR_REG_MEMBASE;
13447 return OP_AMD64_XOR_REG_MEMBASE;
13456 mono_op_to_op_imm_noemul (int opcode)
13459 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13465 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13472 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13477 return mono_op_to_op_imm (opcode);
13482 * mono_handle_global_vregs:
13484 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13488 mono_handle_global_vregs (MonoCompile *cfg)
13490 gint32 *vreg_to_bb;
13491 MonoBasicBlock *bb;
13494 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13496 #ifdef MONO_ARCH_SIMD_INTRINSICS
13497 if (cfg->uses_simd_intrinsics)
13498 mono_simd_simplify_indirection (cfg);
13501 /* Find local vregs used in more than one bb */
13502 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13503 MonoInst *ins = bb->code;
13504 int block_num = bb->block_num;
13506 if (cfg->verbose_level > 2)
13507 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13510 for (; ins; ins = ins->next) {
13511 const char *spec = INS_INFO (ins->opcode);
13512 int regtype = 0, regindex;
13515 if (G_UNLIKELY (cfg->verbose_level > 2))
13516 mono_print_ins (ins);
13518 g_assert (ins->opcode >= MONO_CEE_LAST);
13520 for (regindex = 0; regindex < 4; regindex ++) {
13523 if (regindex == 0) {
13524 regtype = spec [MONO_INST_DEST];
13525 if (regtype == ' ')
13528 } else if (regindex == 1) {
13529 regtype = spec [MONO_INST_SRC1];
13530 if (regtype == ' ')
13533 } else if (regindex == 2) {
13534 regtype = spec [MONO_INST_SRC2];
13535 if (regtype == ' ')
13538 } else if (regindex == 3) {
13539 regtype = spec [MONO_INST_SRC3];
13540 if (regtype == ' ')
13545 #if SIZEOF_REGISTER == 4
13546 /* In the LLVM case, the long opcodes are not decomposed */
13547 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13549 * Since some instructions reference the original long vreg,
13550 * and some reference the two component vregs, it is quite hard
13551 * to determine when it needs to be global. So be conservative.
13553 if (!get_vreg_to_inst (cfg, vreg)) {
13554 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13556 if (cfg->verbose_level > 2)
13557 printf ("LONG VREG R%d made global.\n", vreg);
13561 * Make the component vregs volatile since the optimizations can
13562 * get confused otherwise.
13564 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13565 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13569 g_assert (vreg != -1);
13571 prev_bb = vreg_to_bb [vreg];
13572 if (prev_bb == 0) {
13573 /* 0 is a valid block num */
13574 vreg_to_bb [vreg] = block_num + 1;
13575 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13576 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13579 if (!get_vreg_to_inst (cfg, vreg)) {
13580 if (G_UNLIKELY (cfg->verbose_level > 2))
13581 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13585 if (vreg_is_ref (cfg, vreg))
13586 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13588 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13591 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13594 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13598 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13601 g_assert_not_reached ();
13605 /* Flag as having been used in more than one bb */
13606 vreg_to_bb [vreg] = -1;
13612 /* If a variable is used in only one bblock, convert it into a local vreg */
13613 for (i = 0; i < cfg->num_varinfo; i++) {
13614 MonoInst *var = cfg->varinfo [i];
13615 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13617 switch (var->type) {
13623 #if SIZEOF_REGISTER == 8
13626 #if !defined(TARGET_X86)
13627 /* Enabling this screws up the fp stack on x86 */
13630 if (mono_arch_is_soft_float ())
13634 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13638 /* Arguments are implicitly global */
13639 /* Putting R4 vars into registers doesn't work currently */
13640 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13641 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13643 * Make that the variable's liveness interval doesn't contain a call, since
13644 * that would cause the lvreg to be spilled, making the whole optimization
13647 /* This is too slow for JIT compilation */
13649 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13651 int def_index, call_index, ins_index;
13652 gboolean spilled = FALSE;
13657 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13658 const char *spec = INS_INFO (ins->opcode);
13660 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13661 def_index = ins_index;
13663 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13664 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13665 if (call_index > def_index) {
13671 if (MONO_IS_CALL (ins))
13672 call_index = ins_index;
13682 if (G_UNLIKELY (cfg->verbose_level > 2))
13683 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13684 var->flags |= MONO_INST_IS_DEAD;
13685 cfg->vreg_to_inst [var->dreg] = NULL;
13692 * Compress the varinfo and vars tables so the liveness computation is faster and
13693 * takes up less space.
13696 for (i = 0; i < cfg->num_varinfo; ++i) {
13697 MonoInst *var = cfg->varinfo [i];
13698 if (pos < i && cfg->locals_start == i)
13699 cfg->locals_start = pos;
13700 if (!(var->flags & MONO_INST_IS_DEAD)) {
13702 cfg->varinfo [pos] = cfg->varinfo [i];
13703 cfg->varinfo [pos]->inst_c0 = pos;
13704 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13705 cfg->vars [pos].idx = pos;
13706 #if SIZEOF_REGISTER == 4
13707 if (cfg->varinfo [pos]->type == STACK_I8) {
13708 /* Modify the two component vars too */
13711 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13712 var1->inst_c0 = pos;
13713 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13714 var1->inst_c0 = pos;
13721 cfg->num_varinfo = pos;
13722 if (cfg->locals_start > cfg->num_varinfo)
13723 cfg->locals_start = cfg->num_varinfo;
13727 * mono_allocate_gsharedvt_vars:
13729 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13730 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13733 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13737 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13739 for (i = 0; i < cfg->num_varinfo; ++i) {
13740 MonoInst *ins = cfg->varinfo [i];
13743 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13744 if (i >= cfg->locals_start) {
13746 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13747 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13748 ins->opcode = OP_GSHAREDVT_LOCAL;
13749 ins->inst_imm = idx;
13752 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13753 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13760 * mono_spill_global_vars:
13762 * Generate spill code for variables which are not allocated to registers,
13763 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13764 * code is generated which could be optimized by the local optimization passes.
13767 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13769 MonoBasicBlock *bb;
13771 int orig_next_vreg;
13772 guint32 *vreg_to_lvreg;
13774 guint32 i, lvregs_len;
13775 gboolean dest_has_lvreg = FALSE;
13776 MonoStackType stacktypes [128];
13777 MonoInst **live_range_start, **live_range_end;
13778 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13780 *need_local_opts = FALSE;
13782 memset (spec2, 0, sizeof (spec2));
13784 /* FIXME: Move this function to mini.c */
13785 stacktypes ['i'] = STACK_PTR;
13786 stacktypes ['l'] = STACK_I8;
13787 stacktypes ['f'] = STACK_R8;
13788 #ifdef MONO_ARCH_SIMD_INTRINSICS
13789 stacktypes ['x'] = STACK_VTYPE;
13792 #if SIZEOF_REGISTER == 4
13793 /* Create MonoInsts for longs */
13794 for (i = 0; i < cfg->num_varinfo; i++) {
13795 MonoInst *ins = cfg->varinfo [i];
13797 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13798 switch (ins->type) {
13803 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13806 g_assert (ins->opcode == OP_REGOFFSET);
13808 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13810 tree->opcode = OP_REGOFFSET;
13811 tree->inst_basereg = ins->inst_basereg;
13812 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13814 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13816 tree->opcode = OP_REGOFFSET;
13817 tree->inst_basereg = ins->inst_basereg;
13818 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13828 if (cfg->compute_gc_maps) {
13829 /* registers need liveness info even for !non refs */
13830 for (i = 0; i < cfg->num_varinfo; i++) {
13831 MonoInst *ins = cfg->varinfo [i];
13833 if (ins->opcode == OP_REGVAR)
13834 ins->flags |= MONO_INST_GC_TRACK;
13838 /* FIXME: widening and truncation */
13841 * As an optimization, when a variable allocated to the stack is first loaded into
13842 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13843 * the variable again.
13845 orig_next_vreg = cfg->next_vreg;
13846 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13847 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13851 * These arrays contain the first and last instructions accessing a given
13853 * Since we emit bblocks in the same order we process them here, and we
13854 * don't split live ranges, these will precisely describe the live range of
13855 * the variable, i.e. the instruction range where a valid value can be found
13856 * in the variables location.
13857 * The live range is computed using the liveness info computed by the liveness pass.
13858 * We can't use vmv->range, since that is an abstract live range, and we need
13859 * one which is instruction precise.
13860 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13862 /* FIXME: Only do this if debugging info is requested */
13863 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13864 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13865 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13866 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13868 /* Add spill loads/stores */
13869 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13872 if (cfg->verbose_level > 2)
13873 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13875 /* Clear vreg_to_lvreg array */
13876 for (i = 0; i < lvregs_len; i++)
13877 vreg_to_lvreg [lvregs [i]] = 0;
13881 MONO_BB_FOR_EACH_INS (bb, ins) {
13882 const char *spec = INS_INFO (ins->opcode);
13883 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13884 gboolean store, no_lvreg;
13885 int sregs [MONO_MAX_SRC_REGS];
13887 if (G_UNLIKELY (cfg->verbose_level > 2))
13888 mono_print_ins (ins);
13890 if (ins->opcode == OP_NOP)
13894 * We handle LDADDR here as well, since it can only be decomposed
13895 * when variable addresses are known.
13897 if (ins->opcode == OP_LDADDR) {
13898 MonoInst *var = (MonoInst *)ins->inst_p0;
13900 if (var->opcode == OP_VTARG_ADDR) {
13901 /* Happens on SPARC/S390 where vtypes are passed by reference */
13902 MonoInst *vtaddr = var->inst_left;
13903 if (vtaddr->opcode == OP_REGVAR) {
13904 ins->opcode = OP_MOVE;
13905 ins->sreg1 = vtaddr->dreg;
13907 else if (var->inst_left->opcode == OP_REGOFFSET) {
13908 ins->opcode = OP_LOAD_MEMBASE;
13909 ins->inst_basereg = vtaddr->inst_basereg;
13910 ins->inst_offset = vtaddr->inst_offset;
13913 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13914 /* gsharedvt arg passed by ref */
13915 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13917 ins->opcode = OP_LOAD_MEMBASE;
13918 ins->inst_basereg = var->inst_basereg;
13919 ins->inst_offset = var->inst_offset;
13920 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13921 MonoInst *load, *load2, *load3;
13922 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13923 int reg1, reg2, reg3;
13924 MonoInst *info_var = cfg->gsharedvt_info_var;
13925 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13929 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13932 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13934 g_assert (info_var);
13935 g_assert (locals_var);
13937 /* Mark the instruction used to compute the locals var as used */
13938 cfg->gsharedvt_locals_var_ins = NULL;
13940 /* Load the offset */
13941 if (info_var->opcode == OP_REGOFFSET) {
13942 reg1 = alloc_ireg (cfg);
13943 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13944 } else if (info_var->opcode == OP_REGVAR) {
13946 reg1 = info_var->dreg;
13948 g_assert_not_reached ();
13950 reg2 = alloc_ireg (cfg);
13951 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13952 /* Load the locals area address */
13953 reg3 = alloc_ireg (cfg);
13954 if (locals_var->opcode == OP_REGOFFSET) {
13955 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13956 } else if (locals_var->opcode == OP_REGVAR) {
13957 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13959 g_assert_not_reached ();
13961 /* Compute the address */
13962 ins->opcode = OP_PADD;
13966 mono_bblock_insert_before_ins (bb, ins, load3);
13967 mono_bblock_insert_before_ins (bb, load3, load2);
13969 mono_bblock_insert_before_ins (bb, load2, load);
13971 g_assert (var->opcode == OP_REGOFFSET);
13973 ins->opcode = OP_ADD_IMM;
13974 ins->sreg1 = var->inst_basereg;
13975 ins->inst_imm = var->inst_offset;
13978 *need_local_opts = TRUE;
13979 spec = INS_INFO (ins->opcode);
13982 if (ins->opcode < MONO_CEE_LAST) {
13983 mono_print_ins (ins);
13984 g_assert_not_reached ();
13988 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13992 if (MONO_IS_STORE_MEMBASE (ins)) {
13993 tmp_reg = ins->dreg;
13994 ins->dreg = ins->sreg2;
13995 ins->sreg2 = tmp_reg;
13998 spec2 [MONO_INST_DEST] = ' ';
13999 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14000 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14001 spec2 [MONO_INST_SRC3] = ' ';
14003 } else if (MONO_IS_STORE_MEMINDEX (ins))
14004 g_assert_not_reached ();
14009 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14010 printf ("\t %.3s %d", spec, ins->dreg);
14011 num_sregs = mono_inst_get_src_registers (ins, sregs);
14012 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14013 printf (" %d", sregs [srcindex]);
14020 regtype = spec [MONO_INST_DEST];
14021 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14024 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14025 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14026 MonoInst *store_ins;
14028 MonoInst *def_ins = ins;
14029 int dreg = ins->dreg; /* The original vreg */
14031 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14033 if (var->opcode == OP_REGVAR) {
14034 ins->dreg = var->dreg;
14035 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14037 * Instead of emitting a load+store, use a _membase opcode.
14039 g_assert (var->opcode == OP_REGOFFSET);
14040 if (ins->opcode == OP_MOVE) {
14044 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14045 ins->inst_basereg = var->inst_basereg;
14046 ins->inst_offset = var->inst_offset;
14049 spec = INS_INFO (ins->opcode);
14053 g_assert (var->opcode == OP_REGOFFSET);
14055 prev_dreg = ins->dreg;
14057 /* Invalidate any previous lvreg for this vreg */
14058 vreg_to_lvreg [ins->dreg] = 0;
14062 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14064 store_opcode = OP_STOREI8_MEMBASE_REG;
14067 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14069 #if SIZEOF_REGISTER != 8
14070 if (regtype == 'l') {
14071 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14072 mono_bblock_insert_after_ins (bb, ins, store_ins);
14073 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14074 mono_bblock_insert_after_ins (bb, ins, store_ins);
14075 def_ins = store_ins;
14080 g_assert (store_opcode != OP_STOREV_MEMBASE);
14082 /* Try to fuse the store into the instruction itself */
14083 /* FIXME: Add more instructions */
14084 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14085 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14086 ins->inst_imm = ins->inst_c0;
14087 ins->inst_destbasereg = var->inst_basereg;
14088 ins->inst_offset = var->inst_offset;
14089 spec = INS_INFO (ins->opcode);
14090 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14091 ins->opcode = store_opcode;
14092 ins->inst_destbasereg = var->inst_basereg;
14093 ins->inst_offset = var->inst_offset;
14097 tmp_reg = ins->dreg;
14098 ins->dreg = ins->sreg2;
14099 ins->sreg2 = tmp_reg;
14102 spec2 [MONO_INST_DEST] = ' ';
14103 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14104 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14105 spec2 [MONO_INST_SRC3] = ' ';
14107 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14108 // FIXME: The backends expect the base reg to be in inst_basereg
14109 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14111 ins->inst_basereg = var->inst_basereg;
14112 ins->inst_offset = var->inst_offset;
14113 spec = INS_INFO (ins->opcode);
14115 /* printf ("INS: "); mono_print_ins (ins); */
14116 /* Create a store instruction */
14117 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14119 /* Insert it after the instruction */
14120 mono_bblock_insert_after_ins (bb, ins, store_ins);
14122 def_ins = store_ins;
14125 * We can't assign ins->dreg to var->dreg here, since the
14126 * sregs could use it. So set a flag, and do it after
14129 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14130 dest_has_lvreg = TRUE;
14135 if (def_ins && !live_range_start [dreg]) {
14136 live_range_start [dreg] = def_ins;
14137 live_range_start_bb [dreg] = bb;
14140 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14143 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14144 tmp->inst_c1 = dreg;
14145 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14152 num_sregs = mono_inst_get_src_registers (ins, sregs);
14153 for (srcindex = 0; srcindex < 3; ++srcindex) {
14154 regtype = spec [MONO_INST_SRC1 + srcindex];
14155 sreg = sregs [srcindex];
14157 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14158 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14159 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14160 MonoInst *use_ins = ins;
14161 MonoInst *load_ins;
14162 guint32 load_opcode;
14164 if (var->opcode == OP_REGVAR) {
14165 sregs [srcindex] = var->dreg;
14166 //mono_inst_set_src_registers (ins, sregs);
14167 live_range_end [sreg] = use_ins;
14168 live_range_end_bb [sreg] = bb;
14170 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14173 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14174 /* var->dreg is a hreg */
14175 tmp->inst_c1 = sreg;
14176 mono_bblock_insert_after_ins (bb, ins, tmp);
14182 g_assert (var->opcode == OP_REGOFFSET);
14184 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14186 g_assert (load_opcode != OP_LOADV_MEMBASE);
14188 if (vreg_to_lvreg [sreg]) {
14189 g_assert (vreg_to_lvreg [sreg] != -1);
14191 /* The variable is already loaded to an lvreg */
14192 if (G_UNLIKELY (cfg->verbose_level > 2))
14193 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14194 sregs [srcindex] = vreg_to_lvreg [sreg];
14195 //mono_inst_set_src_registers (ins, sregs);
14199 /* Try to fuse the load into the instruction */
14200 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14201 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14202 sregs [0] = var->inst_basereg;
14203 //mono_inst_set_src_registers (ins, sregs);
14204 ins->inst_offset = var->inst_offset;
14205 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14206 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14207 sregs [1] = var->inst_basereg;
14208 //mono_inst_set_src_registers (ins, sregs);
14209 ins->inst_offset = var->inst_offset;
14211 if (MONO_IS_REAL_MOVE (ins)) {
14212 ins->opcode = OP_NOP;
14215 //printf ("%d ", srcindex); mono_print_ins (ins);
14217 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14219 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14220 if (var->dreg == prev_dreg) {
14222 * sreg refers to the value loaded by the load
14223 * emitted below, but we need to use ins->dreg
14224 * since it refers to the store emitted earlier.
14228 g_assert (sreg != -1);
14229 vreg_to_lvreg [var->dreg] = sreg;
14230 g_assert (lvregs_len < 1024);
14231 lvregs [lvregs_len ++] = var->dreg;
14235 sregs [srcindex] = sreg;
14236 //mono_inst_set_src_registers (ins, sregs);
14238 #if SIZEOF_REGISTER != 8
14239 if (regtype == 'l') {
14240 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14241 mono_bblock_insert_before_ins (bb, ins, load_ins);
14242 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14243 mono_bblock_insert_before_ins (bb, ins, load_ins);
14244 use_ins = load_ins;
14249 #if SIZEOF_REGISTER == 4
14250 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14252 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14253 mono_bblock_insert_before_ins (bb, ins, load_ins);
14254 use_ins = load_ins;
14258 if (var->dreg < orig_next_vreg) {
14259 live_range_end [var->dreg] = use_ins;
14260 live_range_end_bb [var->dreg] = bb;
14263 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14266 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14267 tmp->inst_c1 = var->dreg;
14268 mono_bblock_insert_after_ins (bb, ins, tmp);
14272 mono_inst_set_src_registers (ins, sregs);
14274 if (dest_has_lvreg) {
14275 g_assert (ins->dreg != -1);
14276 vreg_to_lvreg [prev_dreg] = ins->dreg;
14277 g_assert (lvregs_len < 1024);
14278 lvregs [lvregs_len ++] = prev_dreg;
14279 dest_has_lvreg = FALSE;
14283 tmp_reg = ins->dreg;
14284 ins->dreg = ins->sreg2;
14285 ins->sreg2 = tmp_reg;
14288 if (MONO_IS_CALL (ins)) {
14289 /* Clear vreg_to_lvreg array */
14290 for (i = 0; i < lvregs_len; i++)
14291 vreg_to_lvreg [lvregs [i]] = 0;
14293 } else if (ins->opcode == OP_NOP) {
14295 MONO_INST_NULLIFY_SREGS (ins);
14298 if (cfg->verbose_level > 2)
14299 mono_print_ins_index (1, ins);
14302 /* Extend the live range based on the liveness info */
14303 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14304 for (i = 0; i < cfg->num_varinfo; i ++) {
14305 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14307 if (vreg_is_volatile (cfg, vi->vreg))
14308 /* The liveness info is incomplete */
14311 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14312 /* Live from at least the first ins of this bb */
14313 live_range_start [vi->vreg] = bb->code;
14314 live_range_start_bb [vi->vreg] = bb;
14317 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14318 /* Live at least until the last ins of this bb */
14319 live_range_end [vi->vreg] = bb->last_ins;
14320 live_range_end_bb [vi->vreg] = bb;
14327 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14328 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14330 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14331 for (i = 0; i < cfg->num_varinfo; ++i) {
14332 int vreg = MONO_VARINFO (cfg, i)->vreg;
14335 if (live_range_start [vreg]) {
14336 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14338 ins->inst_c1 = vreg;
14339 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14341 if (live_range_end [vreg]) {
14342 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14344 ins->inst_c1 = vreg;
14345 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14346 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14348 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14353 if (cfg->gsharedvt_locals_var_ins) {
14354 /* Nullify if unused */
14355 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14356 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14359 g_free (live_range_start);
14360 g_free (live_range_end);
14361 g_free (live_range_start_bb);
14362 g_free (live_range_end_bb);
14368 * - use 'iadd' instead of 'int_add'
14369 * - handling ovf opcodes: decompose in method_to_ir.
14370 * - unify iregs/fregs
14371 * -> partly done, the missing parts are:
14372 * - a more complete unification would involve unifying the hregs as well, so
14373 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14374 * would no longer map to the machine hregs, so the code generators would need to
14375 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14376 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14377 * fp/non-fp branches speeds it up by about 15%.
14378 * - use sext/zext opcodes instead of shifts
14380 * - get rid of TEMPLOADs if possible and use vregs instead
14381 * - clean up usage of OP_P/OP_ opcodes
14382 * - cleanup usage of DUMMY_USE
14383 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14385 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14386 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14387 * - make sure handle_stack_args () is called before the branch is emitted
14388 * - when the new IR is done, get rid of all unused stuff
14389 * - COMPARE/BEQ as separate instructions or unify them ?
14390 * - keeping them separate allows specialized compare instructions like
14391 * compare_imm, compare_membase
14392 * - most back ends unify fp compare+branch, fp compare+ceq
14393 * - integrate mono_save_args into inline_method
14394 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14395 * - handle long shift opts on 32 bit platforms somehow: they require
14396 * 3 sregs (2 for arg1 and 1 for arg2)
14397 * - make byref a 'normal' type.
14398 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14399 * variable if needed.
14400 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14401 * like inline_method.
14402 * - remove inlining restrictions
14403 * - fix LNEG and enable cfold of INEG
14404 * - generalize x86 optimizations like ldelema as a peephole optimization
14405 * - add store_mem_imm for amd64
14406 * - optimize the loading of the interruption flag in the managed->native wrappers
14407 * - avoid special handling of OP_NOP in passes
14408 * - move code inserting instructions into one function/macro.
14409 * - try a coalescing phase after liveness analysis
14410 * - add float -> vreg conversion + local optimizations on !x86
14411 * - figure out how to handle decomposed branches during optimizations, ie.
14412 * compare+branch, op_jump_table+op_br etc.
14413 * - promote RuntimeXHandles to vregs
14414 * - vtype cleanups:
14415 * - add a NEW_VARLOADA_VREG macro
14416 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14417 * accessing vtype fields.
14418 * - get rid of I8CONST on 64 bit platforms
14419 * - dealing with the increase in code size due to branches created during opcode
14421 * - use extended basic blocks
14422 * - all parts of the JIT
14423 * - handle_global_vregs () && local regalloc
14424 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14425 * - sources of increase in code size:
14428 * - isinst and castclass
14429 * - lvregs not allocated to global registers even if used multiple times
14430 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14432 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14433 * - add all micro optimizations from the old JIT
14434 * - put tree optimizations into the deadce pass
14435 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14436 * specific function.
14437 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14438 * fcompare + branchCC.
14439 * - create a helper function for allocating a stack slot, taking into account
14440 * MONO_CFG_HAS_SPILLUP.
14442 * - merge the ia64 switch changes.
14443 * - optimize mono_regstate2_alloc_int/float.
14444 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14445 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14446 * parts of the tree could be separated by other instructions, killing the tree
14447 * arguments, or stores killing loads etc. Also, should we fold loads into other
14448 * instructions if the result of the load is used multiple times ?
14449 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14450 * - LAST MERGE: 108395.
14451 * - when returning vtypes in registers, generate IR and append it to the end of the
14452 * last bb instead of doing it in the epilog.
14453 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14461 - When to decompose opcodes:
14462 - earlier: this makes some optimizations hard to implement, since the low level IR
14463 no longer contains the neccessary information. But it is easier to do.
14464 - later: harder to implement, enables more optimizations.
14465 - Branches inside bblocks:
14466 - created when decomposing complex opcodes.
14467 - branches to another bblock: harmless, but not tracked by the branch
14468 optimizations, so need to branch to a label at the start of the bblock.
14469 - branches to inside the same bblock: very problematic, trips up the local
14470 reg allocator. Can be fixed by spitting the current bblock, but that is a
14471 complex operation, since some local vregs can become global vregs etc.
14472 - Local/global vregs:
14473 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14474 local register allocator.
14475 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14476 structure, created by mono_create_var (). Assigned to hregs or the stack by
14477 the global register allocator.
14478 - When to do optimizations like alu->alu_imm:
14479 - earlier -> saves work later on since the IR will be smaller/simpler
14480 - later -> can work on more instructions
14481 - Handling of valuetypes:
14482 - When a vtype is pushed on the stack, a new temporary is created, an
14483 instruction computing its address (LDADDR) is emitted and pushed on
14484 the stack. Need to optimize cases when the vtype is used immediately as in
14485 argument passing, stloc etc.
14486 - Instead of the to_end stuff in the old JIT, simply call the function handling
14487 the values on the stack before emitting the last instruction of the bb.
14490 #else /* !DISABLE_JIT */
14492 MONO_EMPTY_SOURCE_FILE (method_to_ir);
14494 #endif /* !DISABLE_JIT */