2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <mono/utils/mono-compiler.h>
30 #ifdef HAVE_SYS_TIME_H
38 #include <mono/utils/memcheck.h>
39 #include <mono/metadata/abi-details.h>
40 #include <mono/metadata/assembly.h>
41 #include <mono/metadata/attrdefs.h>
42 #include <mono/metadata/loader.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/class.h>
45 #include <mono/metadata/object.h>
46 #include <mono/metadata/exception.h>
47 #include <mono/metadata/opcodes.h>
48 #include <mono/metadata/mono-endian.h>
49 #include <mono/metadata/tokentype.h>
50 #include <mono/metadata/tabledefs.h>
51 #include <mono/metadata/marshal.h>
52 #include <mono/metadata/debug-helpers.h>
53 #include <mono/metadata/debug-internals.h>
54 #include <mono/metadata/gc-internals.h>
55 #include <mono/metadata/security-manager.h>
56 #include <mono/metadata/threads-types.h>
57 #include <mono/metadata/security-core-clr.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/monitor.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/utils/mono-error-internals.h>
63 #include <mono/metadata/mono-basic-block.h>
64 #include <mono/metadata/reflection-internals.h>
65 #include <mono/utils/mono-threads-coop.h>
71 #include "jit-icalls.h"
73 #include "debugger-agent.h"
74 #include "seq-points.h"
75 #include "aot-compiler.h"
76 #include "mini-llvm.h"
78 #define BRANCH_COST 10
79 #define INLINE_LENGTH_LIMIT 20
81 /* These have 'cfg' as an implicit argument */
82 #define INLINE_FAILURE(msg) do { \
83 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
84 inline_failure (cfg, msg); \
85 goto exception_exit; \
88 #define CHECK_CFG_EXCEPTION do {\
89 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
90 goto exception_exit; \
92 #define FIELD_ACCESS_FAILURE(method, field) do { \
93 field_access_failure ((cfg), (method), (field)); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
99 goto exception_exit; \
102 #define GSHAREDVT_FAILURE(opcode) do { \
103 if (cfg->gsharedvt) { \
104 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
110 mono_error_set_out_of_memory (&cfg->error, ""); \
111 goto exception_exit; \
113 #define DISABLE_AOT(cfg) do { \
114 if ((cfg)->verbose_level >= 2) \
115 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
116 (cfg)->disable_aot = TRUE; \
118 #define LOAD_ERROR do { \
119 break_on_unverified (); \
120 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
121 goto exception_exit; \
124 #define TYPE_LOAD_ERROR(klass) do { \
125 cfg->exception_ptr = klass; \
129 #define CHECK_CFG_ERROR do {\
130 if (!mono_error_ok (&cfg->error)) { \
131 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
132 goto mono_error_exit; \
136 /* Determine whenever 'ins' represents a load of the 'this' argument */
137 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
139 static int ldind_to_load_membase (int opcode);
140 static int stind_to_store_membase (int opcode);
142 int mono_op_to_op_imm (int opcode);
143 int mono_op_to_op_imm_noemul (int opcode);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always);
148 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
150 inline static MonoInst*
151 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
153 /* helper methods signatures */
154 static MonoMethodSignature *helper_sig_domain_get;
155 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
156 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
157 static MonoMethodSignature *helper_sig_jit_thread_attach;
158 static MonoMethodSignature *helper_sig_get_tls_tramp;
159 static MonoMethodSignature *helper_sig_set_tls_tramp;
161 /* type loading helpers */
162 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
163 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
166 * Instruction metadata
174 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
175 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
181 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
186 /* keep in sync with the enum in mini.h */
189 #include "mini-ops.h"
194 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
195 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
197 * This should contain the index of the last sreg + 1. This is not the same
198 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
200 const gint8 ins_sreg_counts[] = {
201 #include "mini-ops.h"
207 mono_alloc_ireg (MonoCompile *cfg)
209 return alloc_ireg (cfg);
213 mono_alloc_lreg (MonoCompile *cfg)
215 return alloc_lreg (cfg);
219 mono_alloc_freg (MonoCompile *cfg)
221 return alloc_freg (cfg);
225 mono_alloc_preg (MonoCompile *cfg)
227 return alloc_preg (cfg);
231 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
233 return alloc_dreg (cfg, stack_type);
237 * mono_alloc_ireg_ref:
239 * Allocate an IREG, and mark it as holding a GC ref.
242 mono_alloc_ireg_ref (MonoCompile *cfg)
244 return alloc_ireg_ref (cfg);
248 * mono_alloc_ireg_mp:
250 * Allocate an IREG, and mark it as holding a managed pointer.
253 mono_alloc_ireg_mp (MonoCompile *cfg)
255 return alloc_ireg_mp (cfg);
259 * mono_alloc_ireg_copy:
261 * Allocate an IREG with the same GC type as VREG.
264 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
266 if (vreg_is_ref (cfg, vreg))
267 return alloc_ireg_ref (cfg);
268 else if (vreg_is_mp (cfg, vreg))
269 return alloc_ireg_mp (cfg);
271 return alloc_ireg (cfg);
275 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
280 type = mini_get_underlying_type (type);
282 switch (type->type) {
295 case MONO_TYPE_FNPTR:
297 case MONO_TYPE_CLASS:
298 case MONO_TYPE_STRING:
299 case MONO_TYPE_OBJECT:
300 case MONO_TYPE_SZARRAY:
301 case MONO_TYPE_ARRAY:
305 #if SIZEOF_REGISTER == 8
311 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
314 case MONO_TYPE_VALUETYPE:
315 if (type->data.klass->enumtype) {
316 type = mono_class_enum_basetype (type->data.klass);
319 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
322 case MONO_TYPE_TYPEDBYREF:
324 case MONO_TYPE_GENERICINST:
325 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
327 type = &type->data.generic_class->container_class->byval_arg;
331 g_assert (cfg->gshared);
332 if (mini_type_var_is_vt (type))
335 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
337 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
343 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 GString *str = g_string_new ("");
349 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
350 for (i = 0; i < bb->in_count; ++i)
351 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
352 g_string_append_printf (str, ", OUT: ");
353 for (i = 0; i < bb->out_count; ++i)
354 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
355 g_string_append_printf (str, " ]\n");
357 g_print ("%s", str->str);
358 g_string_free (str, TRUE);
360 for (tree = bb->code; tree; tree = tree->next)
361 mono_print_ins_index (-1, tree);
365 mono_create_helper_signatures (void)
367 helper_sig_domain_get = mono_create_icall_signature ("ptr");
368 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
369 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
370 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
371 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
372 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
375 static MONO_NEVER_INLINE void
376 break_on_unverified (void)
378 if (mini_get_debug_options ()->break_on_unverified)
382 static MONO_NEVER_INLINE void
383 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
385 char *method_fname = mono_method_full_name (method, TRUE);
386 char *field_fname = mono_field_full_name (field);
387 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
388 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
389 g_free (method_fname);
390 g_free (field_fname);
393 static MONO_NEVER_INLINE void
394 inline_failure (MonoCompile *cfg, const char *msg)
396 if (cfg->verbose_level >= 2)
397 printf ("inline failed: %s\n", msg);
398 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
401 static MONO_NEVER_INLINE void
402 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
404 if (cfg->verbose_level > 2) \
405 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
406 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
409 static MONO_NEVER_INLINE void
410 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
412 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
413 if (cfg->verbose_level >= 2)
414 printf ("%s\n", cfg->exception_message);
415 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
419 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
420 * foo<T> (int i) { ldarg.0; box T; }
422 #define UNVERIFIED do { \
423 if (cfg->gsharedvt) { \
424 if (cfg->verbose_level > 2) \
425 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
426 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
427 goto exception_exit; \
429 break_on_unverified (); \
433 #define GET_BBLOCK(cfg,tblock,ip) do { \
434 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
436 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
437 NEW_BBLOCK (cfg, (tblock)); \
438 (tblock)->cil_code = (ip); \
439 ADD_BBLOCK (cfg, (tblock)); \
443 #if defined(TARGET_X86) || defined(TARGET_AMD64)
444 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
445 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
446 (dest)->dreg = alloc_ireg_mp ((cfg)); \
447 (dest)->sreg1 = (sr1); \
448 (dest)->sreg2 = (sr2); \
449 (dest)->inst_imm = (imm); \
450 (dest)->backend.shift_amount = (shift); \
451 MONO_ADD_INS ((cfg)->cbb, (dest)); \
455 /* Emit conversions so both operands of a binary opcode are of the same type */
457 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
459 MonoInst *arg1 = *arg1_ref;
460 MonoInst *arg2 = *arg2_ref;
463 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
464 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
467 /* Mixing r4/r8 is allowed by the spec */
468 if (arg1->type == STACK_R4) {
469 int dreg = alloc_freg (cfg);
471 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
472 conv->type = STACK_R8;
476 if (arg2->type == STACK_R4) {
477 int dreg = alloc_freg (cfg);
479 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
480 conv->type = STACK_R8;
486 #if SIZEOF_REGISTER == 8
487 /* FIXME: Need to add many more cases */
488 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
491 int dr = alloc_preg (cfg);
492 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
493 (ins)->sreg2 = widen->dreg;
498 #define ADD_BINOP(op) do { \
499 MONO_INST_NEW (cfg, ins, (op)); \
501 ins->sreg1 = sp [0]->dreg; \
502 ins->sreg2 = sp [1]->dreg; \
503 type_from_op (cfg, ins, sp [0], sp [1]); \
505 /* Have to insert a widening op */ \
506 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
507 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
508 MONO_ADD_INS ((cfg)->cbb, (ins)); \
509 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
512 #define ADD_UNOP(op) do { \
513 MONO_INST_NEW (cfg, ins, (op)); \
515 ins->sreg1 = sp [0]->dreg; \
516 type_from_op (cfg, ins, sp [0], NULL); \
518 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
519 MONO_ADD_INS ((cfg)->cbb, (ins)); \
520 *sp++ = mono_decompose_opcode (cfg, ins); \
523 #define ADD_BINCOND(next_block) do { \
526 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
527 cmp->sreg1 = sp [0]->dreg; \
528 cmp->sreg2 = sp [1]->dreg; \
529 type_from_op (cfg, cmp, sp [0], sp [1]); \
531 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
532 type_from_op (cfg, ins, sp [0], sp [1]); \
533 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
534 GET_BBLOCK (cfg, tblock, target); \
535 link_bblock (cfg, cfg->cbb, tblock); \
536 ins->inst_true_bb = tblock; \
537 if ((next_block)) { \
538 link_bblock (cfg, cfg->cbb, (next_block)); \
539 ins->inst_false_bb = (next_block); \
540 start_new_bblock = 1; \
542 GET_BBLOCK (cfg, tblock, ip); \
543 link_bblock (cfg, cfg->cbb, tblock); \
544 ins->inst_false_bb = tblock; \
545 start_new_bblock = 2; \
547 if (sp != stack_start) { \
548 handle_stack_args (cfg, stack_start, sp - stack_start); \
549 CHECK_UNVERIFIABLE (cfg); \
551 MONO_ADD_INS (cfg->cbb, cmp); \
552 MONO_ADD_INS (cfg->cbb, ins); \
556 * link_bblock: Links two basic blocks
558 * links two basic blocks in the control flow graph, the 'from'
559 * argument is the starting block and the 'to' argument is the block
560 * the control flow ends to after 'from'.
563 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
565 MonoBasicBlock **newa;
569 if (from->cil_code) {
571 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
573 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
576 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
578 printf ("edge from entry to exit\n");
583 for (i = 0; i < from->out_count; ++i) {
584 if (to == from->out_bb [i]) {
590 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
591 for (i = 0; i < from->out_count; ++i) {
592 newa [i] = from->out_bb [i];
600 for (i = 0; i < to->in_count; ++i) {
601 if (from == to->in_bb [i]) {
607 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
608 for (i = 0; i < to->in_count; ++i) {
609 newa [i] = to->in_bb [i];
618 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
620 link_bblock (cfg, from, to);
624 * mono_find_block_region:
626 * We mark each basic block with a region ID. We use that to avoid BB
627 * optimizations when blocks are in different regions.
630 * A region token that encodes where this region is, and information
631 * about the clause owner for this block.
633 * The region encodes the try/catch/filter clause that owns this block
634 * as well as the type. -1 is a special value that represents a block
635 * that is in none of try/catch/filter.
638 mono_find_block_region (MonoCompile *cfg, int offset)
640 MonoMethodHeader *header = cfg->header;
641 MonoExceptionClause *clause;
644 for (i = 0; i < header->num_clauses; ++i) {
645 clause = &header->clauses [i];
646 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
647 (offset < (clause->handler_offset)))
648 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
650 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
651 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
652 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
653 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
654 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
656 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
659 for (i = 0; i < header->num_clauses; ++i) {
660 clause = &header->clauses [i];
662 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
663 return ((i + 1) << 8) | clause->flags;
670 ip_in_finally_clause (MonoCompile *cfg, int offset)
672 MonoMethodHeader *header = cfg->header;
673 MonoExceptionClause *clause;
676 for (i = 0; i < header->num_clauses; ++i) {
677 clause = &header->clauses [i];
678 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
681 if (MONO_OFFSET_IN_HANDLER (clause, offset))
688 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
690 MonoMethodHeader *header = cfg->header;
691 MonoExceptionClause *clause;
695 for (i = 0; i < header->num_clauses; ++i) {
696 clause = &header->clauses [i];
697 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
698 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
699 if (clause->flags == type)
700 res = g_list_append (res, clause);
707 mono_create_spvar_for_region (MonoCompile *cfg, int region)
711 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
715 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
716 /* prevent it from being register allocated */
717 var->flags |= MONO_INST_VOLATILE;
719 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
723 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
725 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
729 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
733 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
737 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
738 /* prevent it from being register allocated */
739 var->flags |= MONO_INST_VOLATILE;
741 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
747 * Returns the type used in the eval stack when @type is loaded.
748 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
751 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
755 type = mini_get_underlying_type (type);
756 inst->klass = klass = mono_class_from_mono_type (type);
758 inst->type = STACK_MP;
763 switch (type->type) {
765 inst->type = STACK_INV;
773 inst->type = STACK_I4;
778 case MONO_TYPE_FNPTR:
779 inst->type = STACK_PTR;
781 case MONO_TYPE_CLASS:
782 case MONO_TYPE_STRING:
783 case MONO_TYPE_OBJECT:
784 case MONO_TYPE_SZARRAY:
785 case MONO_TYPE_ARRAY:
786 inst->type = STACK_OBJ;
790 inst->type = STACK_I8;
793 inst->type = cfg->r4_stack_type;
796 inst->type = STACK_R8;
798 case MONO_TYPE_VALUETYPE:
799 if (type->data.klass->enumtype) {
800 type = mono_class_enum_basetype (type->data.klass);
804 inst->type = STACK_VTYPE;
807 case MONO_TYPE_TYPEDBYREF:
808 inst->klass = mono_defaults.typed_reference_class;
809 inst->type = STACK_VTYPE;
811 case MONO_TYPE_GENERICINST:
812 type = &type->data.generic_class->container_class->byval_arg;
816 g_assert (cfg->gshared);
817 if (mini_is_gsharedvt_type (type)) {
818 g_assert (cfg->gsharedvt);
819 inst->type = STACK_VTYPE;
821 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
825 g_error ("unknown type 0x%02x in eval stack type", type->type);
830 * The following tables are used to quickly validate the IL code in type_from_op ().
833 bin_num_table [STACK_MAX] [STACK_MAX] = {
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
839 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
847 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
850 /* reduce the size of this table */
852 bin_int_table [STACK_MAX] [STACK_MAX] = {
853 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
864 bin_comp_table [STACK_MAX] [STACK_MAX] = {
865 /* Inv i L p F & O vt r4 */
867 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
868 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
869 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
870 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
871 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
872 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
873 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
874 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
877 /* reduce the size of this table */
879 shift_table [STACK_MAX] [STACK_MAX] = {
880 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
887 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
891 * Tables to map from the non-specific opcode to the matching
892 * type-specific opcode.
894 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
896 binops_op_map [STACK_MAX] = {
897 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
900 /* handles from CEE_NEG to CEE_CONV_U8 */
902 unops_op_map [STACK_MAX] = {
903 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
906 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
908 ovfops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
912 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
914 ovf2ops_op_map [STACK_MAX] = {
915 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
918 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
920 ovf3ops_op_map [STACK_MAX] = {
921 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
924 /* handles from CEE_BEQ to CEE_BLT_UN */
926 beqops_op_map [STACK_MAX] = {
927 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
930 /* handles from CEE_CEQ to CEE_CLT_UN */
932 ceqops_op_map [STACK_MAX] = {
933 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
937 * Sets ins->type (the type on the eval stack) according to the
938 * type of the opcode and the arguments to it.
939 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
941 * FIXME: this function sets ins->type unconditionally in some cases, but
942 * it should set it to invalid for some types (a conv.x on an object)
945 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
947 switch (ins->opcode) {
954 /* FIXME: check unverifiable args for STACK_MP */
955 ins->type = bin_num_table [src1->type] [src2->type];
956 ins->opcode += binops_op_map [ins->type];
963 ins->type = bin_int_table [src1->type] [src2->type];
964 ins->opcode += binops_op_map [ins->type];
969 ins->type = shift_table [src1->type] [src2->type];
970 ins->opcode += binops_op_map [ins->type];
975 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
976 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
977 ins->opcode = OP_LCOMPARE;
978 else if (src1->type == STACK_R4)
979 ins->opcode = OP_RCOMPARE;
980 else if (src1->type == STACK_R8)
981 ins->opcode = OP_FCOMPARE;
983 ins->opcode = OP_ICOMPARE;
985 case OP_ICOMPARE_IMM:
986 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
987 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
988 ins->opcode = OP_LCOMPARE_IMM;
1000 ins->opcode += beqops_op_map [src1->type];
1003 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1004 ins->opcode += ceqops_op_map [src1->type];
1010 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1011 ins->opcode += ceqops_op_map [src1->type];
1015 ins->type = neg_table [src1->type];
1016 ins->opcode += unops_op_map [ins->type];
1019 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1020 ins->type = src1->type;
1022 ins->type = STACK_INV;
1023 ins->opcode += unops_op_map [ins->type];
1029 ins->type = STACK_I4;
1030 ins->opcode += unops_op_map [src1->type];
1033 ins->type = STACK_R8;
1034 switch (src1->type) {
1037 ins->opcode = OP_ICONV_TO_R_UN;
1040 ins->opcode = OP_LCONV_TO_R_UN;
1044 case CEE_CONV_OVF_I1:
1045 case CEE_CONV_OVF_U1:
1046 case CEE_CONV_OVF_I2:
1047 case CEE_CONV_OVF_U2:
1048 case CEE_CONV_OVF_I4:
1049 case CEE_CONV_OVF_U4:
1050 ins->type = STACK_I4;
1051 ins->opcode += ovf3ops_op_map [src1->type];
1053 case CEE_CONV_OVF_I_UN:
1054 case CEE_CONV_OVF_U_UN:
1055 ins->type = STACK_PTR;
1056 ins->opcode += ovf2ops_op_map [src1->type];
1058 case CEE_CONV_OVF_I1_UN:
1059 case CEE_CONV_OVF_I2_UN:
1060 case CEE_CONV_OVF_I4_UN:
1061 case CEE_CONV_OVF_U1_UN:
1062 case CEE_CONV_OVF_U2_UN:
1063 case CEE_CONV_OVF_U4_UN:
1064 ins->type = STACK_I4;
1065 ins->opcode += ovf2ops_op_map [src1->type];
1068 ins->type = STACK_PTR;
1069 switch (src1->type) {
1071 ins->opcode = OP_ICONV_TO_U;
1075 #if SIZEOF_VOID_P == 8
1076 ins->opcode = OP_LCONV_TO_U;
1078 ins->opcode = OP_MOVE;
1082 ins->opcode = OP_LCONV_TO_U;
1085 ins->opcode = OP_FCONV_TO_U;
1091 ins->type = STACK_I8;
1092 ins->opcode += unops_op_map [src1->type];
1094 case CEE_CONV_OVF_I8:
1095 case CEE_CONV_OVF_U8:
1096 ins->type = STACK_I8;
1097 ins->opcode += ovf3ops_op_map [src1->type];
1099 case CEE_CONV_OVF_U8_UN:
1100 case CEE_CONV_OVF_I8_UN:
1101 ins->type = STACK_I8;
1102 ins->opcode += ovf2ops_op_map [src1->type];
1105 ins->type = cfg->r4_stack_type;
1106 ins->opcode += unops_op_map [src1->type];
1109 ins->type = STACK_R8;
1110 ins->opcode += unops_op_map [src1->type];
1113 ins->type = STACK_R8;
1117 ins->type = STACK_I4;
1118 ins->opcode += ovfops_op_map [src1->type];
1121 case CEE_CONV_OVF_I:
1122 case CEE_CONV_OVF_U:
1123 ins->type = STACK_PTR;
1124 ins->opcode += ovfops_op_map [src1->type];
1127 case CEE_ADD_OVF_UN:
1129 case CEE_MUL_OVF_UN:
1131 case CEE_SUB_OVF_UN:
1132 ins->type = bin_num_table [src1->type] [src2->type];
1133 ins->opcode += ovfops_op_map [src1->type];
1134 if (ins->type == STACK_R8)
1135 ins->type = STACK_INV;
1137 case OP_LOAD_MEMBASE:
1138 ins->type = STACK_PTR;
1140 case OP_LOADI1_MEMBASE:
1141 case OP_LOADU1_MEMBASE:
1142 case OP_LOADI2_MEMBASE:
1143 case OP_LOADU2_MEMBASE:
1144 case OP_LOADI4_MEMBASE:
1145 case OP_LOADU4_MEMBASE:
1146 ins->type = STACK_PTR;
1148 case OP_LOADI8_MEMBASE:
1149 ins->type = STACK_I8;
1151 case OP_LOADR4_MEMBASE:
1152 ins->type = cfg->r4_stack_type;
1154 case OP_LOADR8_MEMBASE:
1155 ins->type = STACK_R8;
1158 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1162 if (ins->type == STACK_MP)
1163 ins->klass = mono_defaults.object_class;
1168 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1174 param_table [STACK_MAX] [STACK_MAX] = {
1179 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1184 switch (args->type) {
1194 for (i = 0; i < sig->param_count; ++i) {
1195 switch (args [i].type) {
1199 if (!sig->params [i]->byref)
1203 if (sig->params [i]->byref)
1205 switch (sig->params [i]->type) {
1206 case MONO_TYPE_CLASS:
1207 case MONO_TYPE_STRING:
1208 case MONO_TYPE_OBJECT:
1209 case MONO_TYPE_SZARRAY:
1210 case MONO_TYPE_ARRAY:
1217 if (sig->params [i]->byref)
1219 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1228 /*if (!param_table [args [i].type] [sig->params [i]->type])
1236 * When we need a pointer to the current domain many times in a method, we
1237 * call mono_domain_get() once and we store the result in a local variable.
1238 * This function returns the variable that represents the MonoDomain*.
1240 inline static MonoInst *
1241 mono_get_domainvar (MonoCompile *cfg)
1243 if (!cfg->domainvar)
1244 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1245 return cfg->domainvar;
1249 * The got_var contains the address of the Global Offset Table when AOT
1253 mono_get_got_var (MonoCompile *cfg)
1255 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1257 if (!cfg->got_var) {
1258 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1260 return cfg->got_var;
1264 mono_get_vtable_var (MonoCompile *cfg)
1266 g_assert (cfg->gshared);
1268 if (!cfg->rgctx_var) {
1269 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1270 /* force the var to be stack allocated */
1271 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1274 return cfg->rgctx_var;
1278 type_from_stack_type (MonoInst *ins) {
1279 switch (ins->type) {
1280 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1281 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1282 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1283 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1284 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1286 return &ins->klass->this_arg;
1287 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1288 case STACK_VTYPE: return &ins->klass->byval_arg;
1290 g_error ("stack type %d to monotype not handled\n", ins->type);
1295 static G_GNUC_UNUSED int
1296 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1298 t = mono_type_get_underlying_type (t);
1310 case MONO_TYPE_FNPTR:
1312 case MONO_TYPE_CLASS:
1313 case MONO_TYPE_STRING:
1314 case MONO_TYPE_OBJECT:
1315 case MONO_TYPE_SZARRAY:
1316 case MONO_TYPE_ARRAY:
1322 return cfg->r4_stack_type;
1325 case MONO_TYPE_VALUETYPE:
1326 case MONO_TYPE_TYPEDBYREF:
1328 case MONO_TYPE_GENERICINST:
1329 if (mono_type_generic_inst_is_valuetype (t))
1335 g_assert_not_reached ();
1342 array_access_to_klass (int opcode)
1346 return mono_defaults.byte_class;
1348 return mono_defaults.uint16_class;
1351 return mono_defaults.int_class;
1354 return mono_defaults.sbyte_class;
1357 return mono_defaults.int16_class;
1360 return mono_defaults.int32_class;
1362 return mono_defaults.uint32_class;
1365 return mono_defaults.int64_class;
1368 return mono_defaults.single_class;
1371 return mono_defaults.double_class;
1372 case CEE_LDELEM_REF:
1373 case CEE_STELEM_REF:
1374 return mono_defaults.object_class;
1376 g_assert_not_reached ();
1382 * We try to share variables when possible
1385 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1390 /* inlining can result in deeper stacks */
1391 if (slot >= cfg->header->max_stack)
1392 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1394 pos = ins->type - 1 + slot * STACK_MAX;
1396 switch (ins->type) {
1403 if ((vnum = cfg->intvars [pos]))
1404 return cfg->varinfo [vnum];
1405 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1406 cfg->intvars [pos] = res->inst_c0;
1409 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1415 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1418 * Don't use this if a generic_context is set, since that means AOT can't
1419 * look up the method using just the image+token.
1420 * table == 0 means this is a reference made from a wrapper.
1422 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1423 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1424 jump_info_token->image = image;
1425 jump_info_token->token = token;
1426 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1431 * This function is called to handle items that are left on the evaluation stack
1432 * at basic block boundaries. What happens is that we save the values to local variables
1433 * and we reload them later when first entering the target basic block (with the
1434 * handle_loaded_temps () function).
1435 * A single joint point will use the same variables (stored in the array bb->out_stack or
1436 * bb->in_stack, if the basic block is before or after the joint point).
1438 * This function needs to be called _before_ emitting the last instruction of
1439 * the bb (i.e. before emitting a branch).
1440 * If the stack merge fails at a join point, cfg->unverifiable is set.
1443 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1446 MonoBasicBlock *bb = cfg->cbb;
1447 MonoBasicBlock *outb;
1448 MonoInst *inst, **locals;
1453 if (cfg->verbose_level > 3)
1454 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1455 if (!bb->out_scount) {
1456 bb->out_scount = count;
1457 //printf ("bblock %d has out:", bb->block_num);
1459 for (i = 0; i < bb->out_count; ++i) {
1460 outb = bb->out_bb [i];
1461 /* exception handlers are linked, but they should not be considered for stack args */
1462 if (outb->flags & BB_EXCEPTION_HANDLER)
1464 //printf (" %d", outb->block_num);
1465 if (outb->in_stack) {
1467 bb->out_stack = outb->in_stack;
1473 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1474 for (i = 0; i < count; ++i) {
1476 * try to reuse temps already allocated for this purpouse, if they occupy the same
1477 * stack slot and if they are of the same type.
1478 * This won't cause conflicts since if 'local' is used to
1479 * store one of the values in the in_stack of a bblock, then
1480 * the same variable will be used for the same outgoing stack
1482 * This doesn't work when inlining methods, since the bblocks
1483 * in the inlined methods do not inherit their in_stack from
1484 * the bblock they are inlined to. See bug #58863 for an
1487 if (cfg->inlined_method)
1488 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1490 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1495 for (i = 0; i < bb->out_count; ++i) {
1496 outb = bb->out_bb [i];
1497 /* exception handlers are linked, but they should not be considered for stack args */
1498 if (outb->flags & BB_EXCEPTION_HANDLER)
1500 if (outb->in_scount) {
1501 if (outb->in_scount != bb->out_scount) {
1502 cfg->unverifiable = TRUE;
1505 continue; /* check they are the same locals */
1507 outb->in_scount = count;
1508 outb->in_stack = bb->out_stack;
1511 locals = bb->out_stack;
1513 for (i = 0; i < count; ++i) {
1514 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1515 inst->cil_code = sp [i]->cil_code;
1516 sp [i] = locals [i];
1517 if (cfg->verbose_level > 3)
1518 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1522 * It is possible that the out bblocks already have in_stack assigned, and
1523 * the in_stacks differ. In this case, we will store to all the different
1530 /* Find a bblock which has a different in_stack */
1532 while (bindex < bb->out_count) {
1533 outb = bb->out_bb [bindex];
1534 /* exception handlers are linked, but they should not be considered for stack args */
1535 if (outb->flags & BB_EXCEPTION_HANDLER) {
1539 if (outb->in_stack != locals) {
1540 for (i = 0; i < count; ++i) {
1541 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1542 inst->cil_code = sp [i]->cil_code;
1543 sp [i] = locals [i];
1544 if (cfg->verbose_level > 3)
1545 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1547 locals = outb->in_stack;
1557 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1561 if (cfg->compile_aot) {
1562 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1568 ji.type = patch_type;
1569 ji.data.target = data;
1570 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1571 mono_error_assert_ok (&error);
1573 EMIT_NEW_PCONST (cfg, ins, target);
1579 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1581 return emit_runtime_constant (cfg, patch_type, data);
1585 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1589 g_assert (val == 0);
1594 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1597 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1600 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1603 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1605 #if SIZEOF_REGISTER == 8
1607 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1613 val_reg = alloc_preg (cfg);
1615 if (SIZEOF_REGISTER == 8)
1616 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1618 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1621 /* This could be optimized further if neccesary */
1623 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1630 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1632 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1637 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1644 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1649 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1654 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1661 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1668 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1669 g_assert (size < 10000);
1672 /* This could be optimized further if neccesary */
1674 cur_reg = alloc_preg (cfg);
1675 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1683 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1685 cur_reg = alloc_preg (cfg);
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1695 cur_reg = alloc_preg (cfg);
1696 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1697 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1703 cur_reg = alloc_preg (cfg);
1704 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1711 cur_reg = alloc_preg (cfg);
1712 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1713 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1721 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1723 int tls_offset = mono_tls_get_tls_offset (key);
1725 if (cfg->compile_aot)
1728 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1730 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1731 ins->dreg = mono_alloc_preg (cfg);
1732 ins->inst_offset = tls_offset;
1739 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1741 int tls_offset = mono_tls_get_tls_offset (key);
1743 if (cfg->compile_aot)
1746 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1748 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1749 ins->sreg1 = value->dreg;
1750 ins->inst_offset = tls_offset;
1758 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1760 MonoInst *fast_tls = NULL;
1762 if (!mini_get_debug_options ()->use_fallback_tls)
1763 fast_tls = mono_create_fast_tls_getter (cfg, key);
1766 MONO_ADD_INS (cfg->cbb, fast_tls);
1770 if (cfg->compile_aot) {
1773 * tls getters are critical pieces of code and we don't want to resolve them
1774 * through the standard plt/tramp mechanism since we might expose ourselves
1775 * to crashes and infinite recursions.
1777 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1778 return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1780 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1781 return mono_emit_jit_icall (cfg, getter, NULL);
1786 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1788 MonoInst *fast_tls = NULL;
1790 if (!mini_get_debug_options ()->use_fallback_tls)
1791 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1794 MONO_ADD_INS (cfg->cbb, fast_tls);
1798 if (cfg->compile_aot) {
1800 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1801 return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1803 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1804 return mono_emit_jit_icall (cfg, setter, &value);
1811 * Emit IR to push the current LMF onto the LMF stack.
1814 emit_push_lmf (MonoCompile *cfg)
1817 * Emit IR to push the LMF:
1818 * lmf_addr = <lmf_addr from tls>
1819 * lmf->lmf_addr = lmf_addr
1820 * lmf->prev_lmf = *lmf_addr
1823 MonoInst *ins, *lmf_ins;
1828 int lmf_reg, prev_lmf_reg;
1830 * Store lmf_addr in a variable, so it can be allocated to a global register.
1832 if (!cfg->lmf_addr_var)
1833 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1836 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1838 int jit_tls_dreg = ins->dreg;
1840 lmf_reg = alloc_preg (cfg);
1841 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1843 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1846 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1848 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1849 lmf_reg = ins->dreg;
1851 prev_lmf_reg = alloc_preg (cfg);
1852 /* Save previous_lmf */
1853 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1854 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1856 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1862 * Emit IR to pop the current LMF from the LMF stack.
1865 emit_pop_lmf (MonoCompile *cfg)
1867 int lmf_reg, lmf_addr_reg;
1873 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1874 lmf_reg = ins->dreg;
1878 * Emit IR to pop the LMF:
1879 * *(lmf->lmf_addr) = lmf->prev_lmf
1881 /* This could be called before emit_push_lmf () */
1882 if (!cfg->lmf_addr_var)
1883 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1884 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1886 prev_lmf_reg = alloc_preg (cfg);
1887 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1888 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1892 emit_instrumentation_call (MonoCompile *cfg, void *func)
1894 MonoInst *iargs [1];
1897 * Avoid instrumenting inlined methods since it can
1898 * distort profiling results.
1900 if (cfg->method != cfg->current_method)
1903 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1904 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1905 mono_emit_jit_icall (cfg, func, iargs);
1910 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1913 type = mini_get_underlying_type (type);
1914 switch (type->type) {
1915 case MONO_TYPE_VOID:
1916 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1923 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1927 case MONO_TYPE_FNPTR:
1928 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1929 case MONO_TYPE_CLASS:
1930 case MONO_TYPE_STRING:
1931 case MONO_TYPE_OBJECT:
1932 case MONO_TYPE_SZARRAY:
1933 case MONO_TYPE_ARRAY:
1934 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1937 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1940 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1942 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1944 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1945 case MONO_TYPE_VALUETYPE:
1946 if (type->data.klass->enumtype) {
1947 type = mono_class_enum_basetype (type->data.klass);
1950 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1951 case MONO_TYPE_TYPEDBYREF:
1952 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1953 case MONO_TYPE_GENERICINST:
1954 type = &type->data.generic_class->container_class->byval_arg;
1957 case MONO_TYPE_MVAR:
1959 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1961 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1966 //XXX this ignores if t is byref
1967 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1970 * target_type_is_incompatible:
1971 * @cfg: MonoCompile context
1973 * Check that the item @arg on the evaluation stack can be stored
1974 * in the target type (can be a local, or field, etc).
1975 * The cfg arg can be used to check if we need verification or just
1978 * Returns: non-0 value if arg can't be stored on a target.
1981 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1983 MonoType *simple_type;
1986 if (target->byref) {
1987 /* FIXME: check that the pointed to types match */
1988 if (arg->type == STACK_MP) {
1989 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1990 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1991 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1993 /* if the target is native int& or same type */
1994 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
1997 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1998 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
1999 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2003 if (arg->type == STACK_PTR)
2008 simple_type = mini_get_underlying_type (target);
2009 switch (simple_type->type) {
2010 case MONO_TYPE_VOID:
2018 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2022 /* STACK_MP is needed when setting pinned locals */
2023 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2028 case MONO_TYPE_FNPTR:
2030 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2031 * in native int. (#688008).
2033 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2036 case MONO_TYPE_CLASS:
2037 case MONO_TYPE_STRING:
2038 case MONO_TYPE_OBJECT:
2039 case MONO_TYPE_SZARRAY:
2040 case MONO_TYPE_ARRAY:
2041 if (arg->type != STACK_OBJ)
2043 /* FIXME: check type compatibility */
2047 if (arg->type != STACK_I8)
2051 if (arg->type != cfg->r4_stack_type)
2055 if (arg->type != STACK_R8)
2058 case MONO_TYPE_VALUETYPE:
2059 if (arg->type != STACK_VTYPE)
2061 klass = mono_class_from_mono_type (simple_type);
2062 if (klass != arg->klass)
2065 case MONO_TYPE_TYPEDBYREF:
2066 if (arg->type != STACK_VTYPE)
2068 klass = mono_class_from_mono_type (simple_type);
2069 if (klass != arg->klass)
2072 case MONO_TYPE_GENERICINST:
2073 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2074 MonoClass *target_class;
2075 if (arg->type != STACK_VTYPE)
2077 klass = mono_class_from_mono_type (simple_type);
2078 target_class = mono_class_from_mono_type (target);
2079 /* The second cases is needed when doing partial sharing */
2080 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2084 if (arg->type != STACK_OBJ)
2086 /* FIXME: check type compatibility */
2090 case MONO_TYPE_MVAR:
2091 g_assert (cfg->gshared);
2092 if (mini_type_var_is_vt (simple_type)) {
2093 if (arg->type != STACK_VTYPE)
2096 if (arg->type != STACK_OBJ)
2101 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2107 * Prepare arguments for passing to a function call.
2108 * Return a non-zero value if the arguments can't be passed to the given
2110 * The type checks are not yet complete and some conversions may need
2111 * casts on 32 or 64 bit architectures.
2113 * FIXME: implement this using target_type_is_incompatible ()
2116 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2118 MonoType *simple_type;
2122 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2126 for (i = 0; i < sig->param_count; ++i) {
2127 if (sig->params [i]->byref) {
2128 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2132 simple_type = mini_get_underlying_type (sig->params [i]);
2134 switch (simple_type->type) {
2135 case MONO_TYPE_VOID:
2144 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2150 case MONO_TYPE_FNPTR:
2151 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2154 case MONO_TYPE_CLASS:
2155 case MONO_TYPE_STRING:
2156 case MONO_TYPE_OBJECT:
2157 case MONO_TYPE_SZARRAY:
2158 case MONO_TYPE_ARRAY:
2159 if (args [i]->type != STACK_OBJ)
2164 if (args [i]->type != STACK_I8)
2168 if (args [i]->type != cfg->r4_stack_type)
2172 if (args [i]->type != STACK_R8)
2175 case MONO_TYPE_VALUETYPE:
2176 if (simple_type->data.klass->enumtype) {
2177 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2180 if (args [i]->type != STACK_VTYPE)
2183 case MONO_TYPE_TYPEDBYREF:
2184 if (args [i]->type != STACK_VTYPE)
2187 case MONO_TYPE_GENERICINST:
2188 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2191 case MONO_TYPE_MVAR:
2193 if (args [i]->type != STACK_VTYPE)
2197 g_error ("unknown type 0x%02x in check_call_signature",
2205 callvirt_to_call (int opcode)
2208 case OP_CALL_MEMBASE:
2210 case OP_VOIDCALL_MEMBASE:
2212 case OP_FCALL_MEMBASE:
2214 case OP_RCALL_MEMBASE:
2216 case OP_VCALL_MEMBASE:
2218 case OP_LCALL_MEMBASE:
2221 g_assert_not_reached ();
2228 callvirt_to_call_reg (int opcode)
2231 case OP_CALL_MEMBASE:
2233 case OP_VOIDCALL_MEMBASE:
2234 return OP_VOIDCALL_REG;
2235 case OP_FCALL_MEMBASE:
2236 return OP_FCALL_REG;
2237 case OP_RCALL_MEMBASE:
2238 return OP_RCALL_REG;
2239 case OP_VCALL_MEMBASE:
2240 return OP_VCALL_REG;
2241 case OP_LCALL_MEMBASE:
2242 return OP_LCALL_REG;
2244 g_assert_not_reached ();
2250 /* Either METHOD or IMT_ARG needs to be set */
2252 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2256 if (COMPILE_LLVM (cfg)) {
2258 method_reg = alloc_preg (cfg);
2259 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2261 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2262 method_reg = ins->dreg;
2266 call->imt_arg_reg = method_reg;
2268 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2273 method_reg = alloc_preg (cfg);
2274 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2276 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2277 method_reg = ins->dreg;
2280 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2283 static MonoJumpInfo *
2284 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2286 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2290 ji->data.target = target;
2296 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2299 return mono_class_check_context_used (klass);
2305 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2308 return mono_method_check_context_used (method);
2314 * check_method_sharing:
2316 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2319 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2321 gboolean pass_vtable = FALSE;
2322 gboolean pass_mrgctx = FALSE;
2324 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2325 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2326 gboolean sharable = FALSE;
2328 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2332 * Pass vtable iff target method might
2333 * be shared, which means that sharing
2334 * is enabled for its class and its
2335 * context is sharable (and it's not a
2338 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2342 if (mini_method_get_context (cmethod) &&
2343 mini_method_get_context (cmethod)->method_inst) {
2344 g_assert (!pass_vtable);
2346 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2349 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2354 if (out_pass_vtable)
2355 *out_pass_vtable = pass_vtable;
2356 if (out_pass_mrgctx)
2357 *out_pass_mrgctx = pass_mrgctx;
2360 inline static MonoCallInst *
2361 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2362 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2366 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2374 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2376 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2378 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2381 call->signature = sig;
2382 call->rgctx_reg = rgctx;
2383 sig_ret = mini_get_underlying_type (sig->ret);
2385 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2388 if (mini_type_is_vtype (sig_ret)) {
2389 call->vret_var = cfg->vret_addr;
2390 //g_assert_not_reached ();
2392 } else if (mini_type_is_vtype (sig_ret)) {
2393 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2396 temp->backend.is_pinvoke = sig->pinvoke;
2399 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2400 * address of return value to increase optimization opportunities.
2401 * Before vtype decomposition, the dreg of the call ins itself represents the
2402 * fact the call modifies the return value. After decomposition, the call will
2403 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2404 * will be transformed into an LDADDR.
2406 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2407 loada->dreg = alloc_preg (cfg);
2408 loada->inst_p0 = temp;
2409 /* We reference the call too since call->dreg could change during optimization */
2410 loada->inst_p1 = call;
2411 MONO_ADD_INS (cfg->cbb, loada);
2413 call->inst.dreg = temp->dreg;
2415 call->vret_var = loada;
2416 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2417 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2419 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2420 if (COMPILE_SOFT_FLOAT (cfg)) {
2422 * If the call has a float argument, we would need to do an r8->r4 conversion using
2423 * an icall, but that cannot be done during the call sequence since it would clobber
2424 * the call registers + the stack. So we do it before emitting the call.
2426 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2428 MonoInst *in = call->args [i];
2430 if (i >= sig->hasthis)
2431 t = sig->params [i - sig->hasthis];
2433 t = &mono_defaults.int_class->byval_arg;
2434 t = mono_type_get_underlying_type (t);
2436 if (!t->byref && t->type == MONO_TYPE_R4) {
2437 MonoInst *iargs [1];
2441 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2443 /* The result will be in an int vreg */
2444 call->args [i] = conv;
2450 call->need_unbox_trampoline = unbox_trampoline;
2453 if (COMPILE_LLVM (cfg))
2454 mono_llvm_emit_call (cfg, call);
2456 mono_arch_emit_call (cfg, call);
2458 mono_arch_emit_call (cfg, call);
2461 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2462 cfg->flags |= MONO_CFG_HAS_CALLS;
2468 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2470 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2471 cfg->uses_rgctx_reg = TRUE;
2472 call->rgctx_reg = TRUE;
2474 call->rgctx_arg_reg = rgctx_reg;
2478 inline static MonoInst*
2479 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2484 gboolean check_sp = FALSE;
2486 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2487 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2489 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2494 rgctx_reg = mono_alloc_preg (cfg);
2495 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2499 if (!cfg->stack_inbalance_var)
2500 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2502 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2503 ins->dreg = cfg->stack_inbalance_var->dreg;
2504 MONO_ADD_INS (cfg->cbb, ins);
2507 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2509 call->inst.sreg1 = addr->dreg;
2512 emit_imt_argument (cfg, call, NULL, imt_arg);
2514 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2519 sp_reg = mono_alloc_preg (cfg);
2521 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2523 MONO_ADD_INS (cfg->cbb, ins);
2525 /* Restore the stack so we don't crash when throwing the exception */
2526 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2527 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2528 MONO_ADD_INS (cfg->cbb, ins);
2530 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2531 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2535 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2537 return (MonoInst*)call;
2541 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2544 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2547 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2548 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2550 #ifndef DISABLE_REMOTING
2551 gboolean might_be_remote = FALSE;
2553 gboolean virtual_ = this_ins != NULL;
2554 gboolean enable_for_aot = TRUE;
2557 MonoInst *call_target = NULL;
2559 gboolean need_unbox_trampoline;
2562 sig = mono_method_signature (method);
2564 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2565 g_assert_not_reached ();
2568 rgctx_reg = mono_alloc_preg (cfg);
2569 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2572 if (method->string_ctor) {
2573 /* Create the real signature */
2574 /* FIXME: Cache these */
2575 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2576 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2581 context_used = mini_method_check_context_used (cfg, method);
2583 #ifndef DISABLE_REMOTING
2584 might_be_remote = this_ins && sig->hasthis &&
2585 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2586 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2588 if (might_be_remote && context_used) {
2591 g_assert (cfg->gshared);
2593 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2595 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2599 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2600 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2602 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2604 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2606 #ifndef DISABLE_REMOTING
2607 if (might_be_remote)
2608 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2611 call->method = method;
2612 call->inst.flags |= MONO_INST_HAS_METHOD;
2613 call->inst.inst_left = this_ins;
2614 call->tail_call = tail;
2617 int vtable_reg, slot_reg, this_reg;
2620 this_reg = this_ins->dreg;
2622 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2623 MonoInst *dummy_use;
2625 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2627 /* Make a call to delegate->invoke_impl */
2628 call->inst.inst_basereg = this_reg;
2629 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2630 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2632 /* We must emit a dummy use here because the delegate trampoline will
2633 replace the 'this' argument with the delegate target making this activation
2634 no longer a root for the delegate.
2635 This is an issue for delegates that target collectible code such as dynamic
2636 methods of GC'able assemblies.
2638 For a test case look into #667921.
2640 FIXME: a dummy use is not the best way to do it as the local register allocator
2641 will put it on a caller save register and spil it around the call.
2642 Ideally, we would either put it on a callee save register or only do the store part.
2644 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2646 return (MonoInst*)call;
2649 if ((!cfg->compile_aot || enable_for_aot) &&
2650 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2651 (MONO_METHOD_IS_FINAL (method) &&
2652 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2653 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2655 * the method is not virtual, we just need to ensure this is not null
2656 * and then we can call the method directly.
2658 #ifndef DISABLE_REMOTING
2659 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2661 * The check above ensures method is not gshared, this is needed since
2662 * gshared methods can't have wrappers.
2664 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2668 if (!method->string_ctor)
2669 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2671 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2672 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2674 * the method is virtual, but we can statically dispatch since either
2675 * it's class or the method itself are sealed.
2676 * But first we need to ensure it's not a null reference.
2678 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2680 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2681 } else if (call_target) {
2682 vtable_reg = alloc_preg (cfg);
2683 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2685 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2686 call->inst.sreg1 = call_target->dreg;
2687 call->inst.flags &= !MONO_INST_HAS_METHOD;
2689 vtable_reg = alloc_preg (cfg);
2690 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2691 if (mono_class_is_interface (method->klass)) {
2692 guint32 imt_slot = mono_method_get_imt_slot (method);
2693 emit_imt_argument (cfg, call, call->method, imt_arg);
2694 slot_reg = vtable_reg;
2695 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2697 slot_reg = vtable_reg;
2698 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2699 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2701 g_assert (mono_method_signature (method)->generic_param_count);
2702 emit_imt_argument (cfg, call, call->method, imt_arg);
2706 call->inst.sreg1 = slot_reg;
2707 call->inst.inst_offset = offset;
2708 call->is_virtual = TRUE;
2712 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2715 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2717 return (MonoInst*)call;
2721 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2723 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2727 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2734 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2737 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2739 return (MonoInst*)call;
2743 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2745 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2749 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2753 * mono_emit_abs_call:
2755 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2757 inline static MonoInst*
2758 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2759 MonoMethodSignature *sig, MonoInst **args)
2761 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2765 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2768 if (cfg->abs_patches == NULL)
2769 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2770 g_hash_table_insert (cfg->abs_patches, ji, ji);
2771 ins = mono_emit_native_call (cfg, ji, sig, args);
2772 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2776 static MonoMethodSignature*
2777 sig_to_rgctx_sig (MonoMethodSignature *sig)
2779 // FIXME: memory allocation
2780 MonoMethodSignature *res;
2783 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2784 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2785 res->param_count = sig->param_count + 1;
2786 for (i = 0; i < sig->param_count; ++i)
2787 res->params [i] = sig->params [i];
2788 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2792 /* Make an indirect call to FSIG passing an additional argument */
2794 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2796 MonoMethodSignature *csig;
2797 MonoInst *args_buf [16];
2799 int i, pindex, tmp_reg;
2801 /* Make a call with an rgctx/extra arg */
2802 if (fsig->param_count + 2 < 16)
2805 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2808 args [pindex ++] = orig_args [0];
2809 for (i = 0; i < fsig->param_count; ++i)
2810 args [pindex ++] = orig_args [fsig->hasthis + i];
2811 tmp_reg = alloc_preg (cfg);
2812 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2813 csig = sig_to_rgctx_sig (fsig);
2814 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2817 /* Emit an indirect call to the function descriptor ADDR */
2819 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2821 int addr_reg, arg_reg;
2822 MonoInst *call_target;
2824 g_assert (cfg->llvm_only);
2827 * addr points to a <addr, arg> pair, load both of them, and
2828 * make a call to addr, passing arg as an extra arg.
2830 addr_reg = alloc_preg (cfg);
2831 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2832 arg_reg = alloc_preg (cfg);
2833 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2835 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2839 direct_icalls_enabled (MonoCompile *cfg)
2843 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2845 if (cfg->compile_llvm && !cfg->llvm_only)
2848 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2854 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2857 * Call the jit icall without a wrapper if possible.
2858 * The wrapper is needed for the following reasons:
2859 * - to handle exceptions thrown using mono_raise_exceptions () from the
2860 * icall function. The EH code needs the lmf frame pushed by the
2861 * wrapper to be able to unwind back to managed code.
2862 * - to be able to do stack walks for asynchronously suspended
2863 * threads when debugging.
2865 if (info->no_raise && direct_icalls_enabled (cfg)) {
2869 if (!info->wrapper_method) {
2870 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2871 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2873 mono_memory_barrier ();
2877 * Inline the wrapper method, which is basically a call to the C icall, and
2878 * an exception check.
2880 costs = inline_method (cfg, info->wrapper_method, NULL,
2881 args, NULL, il_offset, TRUE);
2882 g_assert (costs > 0);
2883 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2887 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2892 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2894 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2895 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2899 * Native code might return non register sized integers
2900 * without initializing the upper bits.
2902 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2903 case OP_LOADI1_MEMBASE:
2904 widen_op = OP_ICONV_TO_I1;
2906 case OP_LOADU1_MEMBASE:
2907 widen_op = OP_ICONV_TO_U1;
2909 case OP_LOADI2_MEMBASE:
2910 widen_op = OP_ICONV_TO_I2;
2912 case OP_LOADU2_MEMBASE:
2913 widen_op = OP_ICONV_TO_U2;
2919 if (widen_op != -1) {
2920 int dreg = alloc_preg (cfg);
2923 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2924 widen->type = ins->type;
2935 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2937 MonoInst *args [16];
2939 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2940 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2942 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2946 get_memcpy_method (void)
2948 static MonoMethod *memcpy_method = NULL;
2949 if (!memcpy_method) {
2950 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2952 g_error ("Old corlib found. Install a new one");
2954 return memcpy_method;
2958 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2960 MonoClassField *field;
2961 gpointer iter = NULL;
2963 while ((field = mono_class_get_fields (klass, &iter))) {
2966 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2968 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2969 if (mini_type_is_reference (mono_field_get_type (field))) {
2970 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2971 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2973 MonoClass *field_class = mono_class_from_mono_type (field->type);
2974 if (field_class->has_references)
2975 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2981 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2983 int card_table_shift_bits;
2984 gpointer card_table_mask;
2986 MonoInst *dummy_use;
2987 int nursery_shift_bits;
2988 size_t nursery_size;
2990 if (!cfg->gen_write_barriers)
2993 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2995 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2997 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3000 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3001 wbarrier->sreg1 = ptr->dreg;
3002 wbarrier->sreg2 = value->dreg;
3003 MONO_ADD_INS (cfg->cbb, wbarrier);
3004 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3005 int offset_reg = alloc_preg (cfg);
3009 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3010 if (card_table_mask)
3011 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3013 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3014 * IMM's larger than 32bits.
3016 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3017 card_reg = ins->dreg;
3019 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3020 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3022 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3023 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3026 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3030 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3032 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3033 unsigned need_wb = 0;
3038 /*types with references can't have alignment smaller than sizeof(void*) */
3039 if (align < SIZEOF_VOID_P)
3042 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3043 if (size > 32 * SIZEOF_VOID_P)
3046 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3048 /* We don't unroll more than 5 stores to avoid code bloat. */
3049 if (size > 5 * SIZEOF_VOID_P) {
3050 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3051 size += (SIZEOF_VOID_P - 1);
3052 size &= ~(SIZEOF_VOID_P - 1);
3054 EMIT_NEW_ICONST (cfg, iargs [2], size);
3055 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3056 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3060 destreg = iargs [0]->dreg;
3061 srcreg = iargs [1]->dreg;
3064 dest_ptr_reg = alloc_preg (cfg);
3065 tmp_reg = alloc_preg (cfg);
3068 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3070 while (size >= SIZEOF_VOID_P) {
3071 MonoInst *load_inst;
3072 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3073 load_inst->dreg = tmp_reg;
3074 load_inst->inst_basereg = srcreg;
3075 load_inst->inst_offset = offset;
3076 MONO_ADD_INS (cfg->cbb, load_inst);
3078 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3081 emit_write_barrier (cfg, iargs [0], load_inst);
3083 offset += SIZEOF_VOID_P;
3084 size -= SIZEOF_VOID_P;
3087 /*tmp += sizeof (void*)*/
3088 if (size >= SIZEOF_VOID_P) {
3089 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3090 MONO_ADD_INS (cfg->cbb, iargs [0]);
3094 /* Those cannot be references since size < sizeof (void*) */
3096 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3097 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3103 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3104 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3110 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3111 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3120 * Emit code to copy a valuetype of type @klass whose address is stored in
3121 * @src->dreg to memory whose address is stored at @dest->dreg.
3124 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3126 MonoInst *iargs [4];
3129 MonoMethod *memcpy_method;
3130 MonoInst *size_ins = NULL;
3131 MonoInst *memcpy_ins = NULL;
3135 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3138 * This check breaks with spilled vars... need to handle it during verification anyway.
3139 * g_assert (klass && klass == src->klass && klass == dest->klass);
3142 if (mini_is_gsharedvt_klass (klass)) {
3144 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3145 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3149 n = mono_class_native_size (klass, &align);
3151 n = mono_class_value_size (klass, &align);
3153 /* if native is true there should be no references in the struct */
3154 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3155 /* Avoid barriers when storing to the stack */
3156 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3157 (dest->opcode == OP_LDADDR))) {
3163 context_used = mini_class_check_context_used (cfg, klass);
3165 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3166 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3168 } else if (context_used) {
3169 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3171 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3172 if (!cfg->compile_aot)
3173 mono_class_compute_gc_descriptor (klass);
3177 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3179 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3184 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3185 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3186 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3191 iargs [2] = size_ins;
3193 EMIT_NEW_ICONST (cfg, iargs [2], n);
3195 memcpy_method = get_memcpy_method ();
3197 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3199 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3204 get_memset_method (void)
3206 static MonoMethod *memset_method = NULL;
3207 if (!memset_method) {
3208 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3210 g_error ("Old corlib found. Install a new one");
3212 return memset_method;
3216 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3218 MonoInst *iargs [3];
3221 MonoMethod *memset_method;
3222 MonoInst *size_ins = NULL;
3223 MonoInst *bzero_ins = NULL;
3224 static MonoMethod *bzero_method;
3226 /* FIXME: Optimize this for the case when dest is an LDADDR */
3227 mono_class_init (klass);
3228 if (mini_is_gsharedvt_klass (klass)) {
3229 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3230 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3232 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3233 g_assert (bzero_method);
3235 iargs [1] = size_ins;
3236 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3240 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3242 n = mono_class_value_size (klass, &align);
3244 if (n <= sizeof (gpointer) * 8) {
3245 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3248 memset_method = get_memset_method ();
3250 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3251 EMIT_NEW_ICONST (cfg, iargs [2], n);
3252 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3259 * Emit IR to return either the this pointer for instance method,
3260 * or the mrgctx for static methods.
3263 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3265 MonoInst *this_ins = NULL;
3267 g_assert (cfg->gshared);
3269 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3270 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3271 !method->klass->valuetype)
3272 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3274 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3275 MonoInst *mrgctx_loc, *mrgctx_var;
3277 g_assert (!this_ins);
3278 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3280 mrgctx_loc = mono_get_vtable_var (cfg);
3281 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3284 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3285 MonoInst *vtable_loc, *vtable_var;
3287 g_assert (!this_ins);
3289 vtable_loc = mono_get_vtable_var (cfg);
3290 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3292 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3293 MonoInst *mrgctx_var = vtable_var;
3296 vtable_reg = alloc_preg (cfg);
3297 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3298 vtable_var->type = STACK_PTR;
3306 vtable_reg = alloc_preg (cfg);
3307 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3312 static MonoJumpInfoRgctxEntry *
3313 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3315 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3316 res->method = method;
3317 res->in_mrgctx = in_mrgctx;
3318 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3319 res->data->type = patch_type;
3320 res->data->data.target = patch_data;
3321 res->info_type = info_type;
3326 static inline MonoInst*
3327 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3329 MonoInst *args [16];
3332 // FIXME: No fastpath since the slot is not a compile time constant
3334 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3335 if (entry->in_mrgctx)
3336 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3338 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3342 * FIXME: This can be called during decompose, which is a problem since it creates
3344 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3346 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3348 MonoBasicBlock *is_null_bb, *end_bb;
3349 MonoInst *res, *ins, *call;
3352 slot = mini_get_rgctx_entry_slot (entry);
3354 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3355 index = MONO_RGCTX_SLOT_INDEX (slot);
3357 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3358 for (depth = 0; ; ++depth) {
3359 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3361 if (index < size - 1)
3366 NEW_BBLOCK (cfg, end_bb);
3367 NEW_BBLOCK (cfg, is_null_bb);
3370 rgctx_reg = rgctx->dreg;
3372 rgctx_reg = alloc_preg (cfg);
3374 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3375 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3376 NEW_BBLOCK (cfg, is_null_bb);
3378 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3382 for (i = 0; i < depth; ++i) {
3383 int array_reg = alloc_preg (cfg);
3385 /* load ptr to next array */
3386 if (mrgctx && i == 0)
3387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3390 rgctx_reg = array_reg;
3391 /* is the ptr null? */
3392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3393 /* if yes, jump to actual trampoline */
3394 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3398 val_reg = alloc_preg (cfg);
3399 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3400 /* is the slot null? */
3401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3402 /* if yes, jump to actual trampoline */
3403 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3406 res_reg = alloc_preg (cfg);
3407 MONO_INST_NEW (cfg, ins, OP_MOVE);
3408 ins->dreg = res_reg;
3409 ins->sreg1 = val_reg;
3410 MONO_ADD_INS (cfg->cbb, ins);
3412 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3415 MONO_START_BB (cfg, is_null_bb);
3417 EMIT_NEW_ICONST (cfg, args [1], index);
3419 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3421 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3422 MONO_INST_NEW (cfg, ins, OP_MOVE);
3423 ins->dreg = res_reg;
3424 ins->sreg1 = call->dreg;
3425 MONO_ADD_INS (cfg->cbb, ins);
3426 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3428 MONO_START_BB (cfg, end_bb);
3437 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3440 static inline MonoInst*
3441 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3444 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3446 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3450 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3451 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3453 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3454 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3456 return emit_rgctx_fetch (cfg, rgctx, entry);
3460 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3461 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3463 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3464 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3466 return emit_rgctx_fetch (cfg, rgctx, entry);
3470 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3471 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3473 MonoJumpInfoGSharedVtCall *call_info;
3474 MonoJumpInfoRgctxEntry *entry;
3477 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3478 call_info->sig = sig;
3479 call_info->method = cmethod;
3481 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3482 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3484 return emit_rgctx_fetch (cfg, rgctx, entry);
3488 * emit_get_rgctx_virt_method:
3490 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3493 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3494 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3496 MonoJumpInfoVirtMethod *info;
3497 MonoJumpInfoRgctxEntry *entry;
3500 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3501 info->klass = klass;
3502 info->method = virt_method;
3504 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3505 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3507 return emit_rgctx_fetch (cfg, rgctx, entry);
3511 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3512 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3514 MonoJumpInfoRgctxEntry *entry;
3517 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3518 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3520 return emit_rgctx_fetch (cfg, rgctx, entry);
3524 * emit_get_rgctx_method:
3526 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3527 * normal constants, else emit a load from the rgctx.
3530 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3531 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3533 if (!context_used) {
3536 switch (rgctx_type) {
3537 case MONO_RGCTX_INFO_METHOD:
3538 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3540 case MONO_RGCTX_INFO_METHOD_RGCTX:
3541 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3544 g_assert_not_reached ();
3547 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3548 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3550 return emit_rgctx_fetch (cfg, rgctx, entry);
3555 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3556 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3558 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3559 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3561 return emit_rgctx_fetch (cfg, rgctx, entry);
3565 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3567 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3568 MonoRuntimeGenericContextInfoTemplate *template_;
3573 for (i = 0; i < info->num_entries; ++i) {
3574 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3576 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3580 if (info->num_entries == info->count_entries) {
3581 MonoRuntimeGenericContextInfoTemplate *new_entries;
3582 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3584 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3586 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3587 info->entries = new_entries;
3588 info->count_entries = new_count_entries;
3591 idx = info->num_entries;
3592 template_ = &info->entries [idx];
3593 template_->info_type = rgctx_type;
3594 template_->data = data;
3596 info->num_entries ++;
3602 * emit_get_gsharedvt_info:
3604 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3607 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3612 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3613 /* Load info->entries [idx] */
3614 dreg = alloc_preg (cfg);
3615 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3621 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3623 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3627 * On return the caller must check @klass for load errors.
3630 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3632 MonoInst *vtable_arg;
3635 context_used = mini_class_check_context_used (cfg, klass);
3638 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3639 klass, MONO_RGCTX_INFO_VTABLE);
3641 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3645 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3648 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3652 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3653 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3655 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3656 ins->sreg1 = vtable_arg->dreg;
3657 MONO_ADD_INS (cfg->cbb, ins);
3660 MonoBasicBlock *inited_bb;
3661 MonoInst *args [16];
3663 inited_reg = alloc_ireg (cfg);
3665 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3667 NEW_BBLOCK (cfg, inited_bb);
3669 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3672 args [0] = vtable_arg;
3673 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3675 MONO_START_BB (cfg, inited_bb);
3680 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3684 if (cfg->gen_seq_points && cfg->method == method) {
3685 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3687 ins->flags |= MONO_INST_NONEMPTY_STACK;
3688 MONO_ADD_INS (cfg->cbb, ins);
3693 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3695 if (mini_get_debug_options ()->better_cast_details) {
3696 int vtable_reg = alloc_preg (cfg);
3697 int klass_reg = alloc_preg (cfg);
3698 MonoBasicBlock *is_null_bb = NULL;
3700 int to_klass_reg, context_used;
3703 NEW_BBLOCK (cfg, is_null_bb);
3705 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3706 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3709 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3711 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3715 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3718 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3720 context_used = mini_class_check_context_used (cfg, klass);
3722 MonoInst *class_ins;
3724 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3725 to_klass_reg = class_ins->dreg;
3727 to_klass_reg = alloc_preg (cfg);
3728 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3733 MONO_START_BB (cfg, is_null_bb);
3738 mini_reset_cast_details (MonoCompile *cfg)
3740 /* Reset the variables holding the cast details */
3741 if (mini_get_debug_options ()->better_cast_details) {
3742 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3743 /* It is enough to reset the from field */
3744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3749 * On return the caller must check @array_class for load errors
3752 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3754 int vtable_reg = alloc_preg (cfg);
3757 context_used = mini_class_check_context_used (cfg, array_class);
3759 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3761 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3763 if (cfg->opt & MONO_OPT_SHARED) {
3764 int class_reg = alloc_preg (cfg);
3767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3768 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3769 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3770 } else if (context_used) {
3771 MonoInst *vtable_ins;
3773 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3774 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3776 if (cfg->compile_aot) {
3780 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3782 vt_reg = alloc_preg (cfg);
3783 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3784 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3787 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3793 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3795 mini_reset_cast_details (cfg);
3799 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3800 * generic code is generated.
3803 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3805 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3808 MonoInst *rgctx, *addr;
3810 /* FIXME: What if the class is shared? We might not
3811 have to get the address of the method from the
3813 addr = emit_get_rgctx_method (cfg, context_used, method,
3814 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3815 if (cfg->llvm_only) {
3816 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3817 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3819 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3821 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3824 gboolean pass_vtable, pass_mrgctx;
3825 MonoInst *rgctx_arg = NULL;
3827 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3828 g_assert (!pass_mrgctx);
3831 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3834 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3837 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3842 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3846 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3847 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3848 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3849 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3851 obj_reg = sp [0]->dreg;
3852 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3853 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3855 /* FIXME: generics */
3856 g_assert (klass->rank == 0);
3859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3860 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3862 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3863 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3866 MonoInst *element_class;
3868 /* This assertion is from the unboxcast insn */
3869 g_assert (klass->rank == 0);
3871 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3872 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3874 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3875 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3877 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3878 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3879 mini_reset_cast_details (cfg);
3882 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3883 MONO_ADD_INS (cfg->cbb, add);
3884 add->type = STACK_MP;
3891 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3893 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3894 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3898 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3904 args [1] = klass_inst;
3907 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3909 NEW_BBLOCK (cfg, is_ref_bb);
3910 NEW_BBLOCK (cfg, is_nullable_bb);
3911 NEW_BBLOCK (cfg, end_bb);
3912 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3914 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3917 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3919 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3920 addr_reg = alloc_dreg (cfg, STACK_MP);
3924 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3925 MONO_ADD_INS (cfg->cbb, addr);
3927 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3930 MONO_START_BB (cfg, is_ref_bb);
3932 /* Save the ref to a temporary */
3933 dreg = alloc_ireg (cfg);
3934 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3935 addr->dreg = addr_reg;
3936 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3937 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3940 MONO_START_BB (cfg, is_nullable_bb);
3943 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3944 MonoInst *unbox_call;
3945 MonoMethodSignature *unbox_sig;
3947 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3948 unbox_sig->ret = &klass->byval_arg;
3949 unbox_sig->param_count = 1;
3950 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3953 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3955 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3957 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3958 addr->dreg = addr_reg;
3961 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3964 MONO_START_BB (cfg, end_bb);
3967 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3973 * Returns NULL and set the cfg exception on error.
3976 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3978 MonoInst *iargs [2];
3983 MonoRgctxInfoType rgctx_info;
3984 MonoInst *iargs [2];
3985 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3987 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3989 if (cfg->opt & MONO_OPT_SHARED)
3990 rgctx_info = MONO_RGCTX_INFO_KLASS;
3992 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3993 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3995 if (cfg->opt & MONO_OPT_SHARED) {
3996 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3998 alloc_ftn = ves_icall_object_new;
4001 alloc_ftn = ves_icall_object_new_specific;
4004 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4005 if (known_instance_size) {
4006 int size = mono_class_instance_size (klass);
4007 if (size < sizeof (MonoObject))
4008 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4010 EMIT_NEW_ICONST (cfg, iargs [1], size);
4012 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4015 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4018 if (cfg->opt & MONO_OPT_SHARED) {
4019 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4020 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4022 alloc_ftn = ves_icall_object_new;
4023 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4024 /* This happens often in argument checking code, eg. throw new FooException... */
4025 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4026 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4027 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4029 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4030 MonoMethod *managed_alloc = NULL;
4034 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4035 cfg->exception_ptr = klass;
4039 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4041 if (managed_alloc) {
4042 int size = mono_class_instance_size (klass);
4043 if (size < sizeof (MonoObject))
4044 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4046 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4047 EMIT_NEW_ICONST (cfg, iargs [1], size);
4048 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4050 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4052 guint32 lw = vtable->klass->instance_size;
4053 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4054 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4055 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4058 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4062 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4066 * Returns NULL and set the cfg exception on error.
4069 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4071 MonoInst *alloc, *ins;
4073 if (mono_class_is_nullable (klass)) {
4074 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4077 if (cfg->llvm_only && cfg->gsharedvt) {
4078 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4079 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4080 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4082 /* FIXME: What if the class is shared? We might not
4083 have to get the method address from the RGCTX. */
4084 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4085 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4086 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4088 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4091 gboolean pass_vtable, pass_mrgctx;
4092 MonoInst *rgctx_arg = NULL;
4094 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4095 g_assert (!pass_mrgctx);
4098 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4101 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4104 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4108 if (mini_is_gsharedvt_klass (klass)) {
4109 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4110 MonoInst *res, *is_ref, *src_var, *addr;
4113 dreg = alloc_ireg (cfg);
4115 NEW_BBLOCK (cfg, is_ref_bb);
4116 NEW_BBLOCK (cfg, is_nullable_bb);
4117 NEW_BBLOCK (cfg, end_bb);
4118 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4120 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4122 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4123 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4126 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4129 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4130 ins->opcode = OP_STOREV_MEMBASE;
4132 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4133 res->type = STACK_OBJ;
4135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4138 MONO_START_BB (cfg, is_ref_bb);
4140 /* val is a vtype, so has to load the value manually */
4141 src_var = get_vreg_to_inst (cfg, val->dreg);
4143 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4144 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4149 MONO_START_BB (cfg, is_nullable_bb);
4152 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4153 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4155 MonoMethodSignature *box_sig;
4158 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4159 * construct that method at JIT time, so have to do things by hand.
4161 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4162 box_sig->ret = &mono_defaults.object_class->byval_arg;
4163 box_sig->param_count = 1;
4164 box_sig->params [0] = &klass->byval_arg;
4167 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4169 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4170 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4171 res->type = STACK_OBJ;
4175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4177 MONO_START_BB (cfg, end_bb);
4181 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4185 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4190 static GHashTable* direct_icall_type_hash;
4193 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4195 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4196 if (!direct_icalls_enabled (cfg))
4200 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4201 * Whitelist a few icalls for now.
4203 if (!direct_icall_type_hash) {
4204 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4206 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4207 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4208 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4209 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4210 mono_memory_barrier ();
4211 direct_icall_type_hash = h;
4214 if (cmethod->klass == mono_defaults.math_class)
4216 /* No locking needed */
4217 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4223 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4225 if (cmethod->klass == mono_defaults.systemtype_class) {
4226 if (!strcmp (cmethod->name, "GetType"))
4232 static G_GNUC_UNUSED MonoInst*
4233 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4235 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4236 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4239 switch (enum_type->type) {
4242 #if SIZEOF_REGISTER == 8
4254 MonoInst *load, *and_, *cmp, *ceq;
4255 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4256 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4257 int dest_reg = alloc_ireg (cfg);
4259 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4260 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4261 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4262 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4264 ceq->type = STACK_I4;
4267 load = mono_decompose_opcode (cfg, load);
4268 and_ = mono_decompose_opcode (cfg, and_);
4269 cmp = mono_decompose_opcode (cfg, cmp);
4270 ceq = mono_decompose_opcode (cfg, ceq);
4278 * Returns NULL and set the cfg exception on error.
4280 static G_GNUC_UNUSED MonoInst*
4281 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4285 gpointer trampoline;
4286 MonoInst *obj, *method_ins, *tramp_ins;
4290 if (virtual_ && !cfg->llvm_only) {
4291 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4294 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4298 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4302 /* Inline the contents of mono_delegate_ctor */
4304 /* Set target field */
4305 /* Optimize away setting of NULL target */
4306 if (!MONO_INS_IS_PCONST_NULL (target)) {
4307 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4308 if (cfg->gen_write_barriers) {
4309 dreg = alloc_preg (cfg);
4310 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4311 emit_write_barrier (cfg, ptr, target);
4315 /* Set method field */
4316 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4317 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4320 * To avoid looking up the compiled code belonging to the target method
4321 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4322 * store it, and we fill it after the method has been compiled.
4324 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4325 MonoInst *code_slot_ins;
4328 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4330 domain = mono_domain_get ();
4331 mono_domain_lock (domain);
4332 if (!domain_jit_info (domain)->method_code_hash)
4333 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4334 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4336 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4337 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4339 mono_domain_unlock (domain);
4341 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4343 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4346 if (cfg->llvm_only) {
4347 MonoInst *args [16];
4352 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4353 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4356 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4362 if (cfg->compile_aot) {
4363 MonoDelegateClassMethodPair *del_tramp;
4365 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4366 del_tramp->klass = klass;
4367 del_tramp->method = context_used ? NULL : method;
4368 del_tramp->is_virtual = virtual_;
4369 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4372 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4374 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4375 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4378 /* Set invoke_impl field */
4380 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4382 dreg = alloc_preg (cfg);
4383 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4384 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4386 dreg = alloc_preg (cfg);
4387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4388 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4391 dreg = alloc_preg (cfg);
4392 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4393 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4395 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4401 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4403 MonoJitICallInfo *info;
4405 /* Need to register the icall so it gets an icall wrapper */
4406 info = mono_get_array_new_va_icall (rank);
4408 cfg->flags |= MONO_CFG_HAS_VARARGS;
4410 /* mono_array_new_va () needs a vararg calling convention */
4411 cfg->exception_message = g_strdup ("array-new");
4412 cfg->disable_llvm = TRUE;
4414 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4415 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4419 * handle_constrained_gsharedvt_call:
4421 * Handle constrained calls where the receiver is a gsharedvt type.
4422 * Return the instruction representing the call. Set the cfg exception on failure.
4425 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4426 gboolean *ref_emit_widen)
4428 MonoInst *ins = NULL;
4429 gboolean emit_widen = *ref_emit_widen;
4432 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4433 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4434 * pack the arguments into an array, and do the rest of the work in in an icall.
4436 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4437 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4438 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4439 MonoInst *args [16];
4442 * This case handles calls to
4443 * - object:ToString()/Equals()/GetHashCode(),
4444 * - System.IComparable<T>:CompareTo()
4445 * - System.IEquatable<T>:Equals ()
4446 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4450 if (mono_method_check_context_used (cmethod))
4451 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4453 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4454 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4456 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4457 if (fsig->hasthis && fsig->param_count) {
4458 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4459 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4460 ins->dreg = alloc_preg (cfg);
4461 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4462 MONO_ADD_INS (cfg->cbb, ins);
4465 if (mini_is_gsharedvt_type (fsig->params [0])) {
4466 int addr_reg, deref_arg_reg;
4468 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4469 deref_arg_reg = alloc_preg (cfg);
4470 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4471 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4473 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4474 addr_reg = ins->dreg;
4475 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4477 EMIT_NEW_ICONST (cfg, args [3], 0);
4478 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4481 EMIT_NEW_ICONST (cfg, args [3], 0);
4482 EMIT_NEW_ICONST (cfg, args [4], 0);
4484 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4487 if (mini_is_gsharedvt_type (fsig->ret)) {
4488 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4489 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4493 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4494 MONO_ADD_INS (cfg->cbb, add);
4496 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4497 MONO_ADD_INS (cfg->cbb, ins);
4498 /* ins represents the call result */
4501 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4504 *ref_emit_widen = emit_widen;
4513 mono_emit_load_got_addr (MonoCompile *cfg)
4515 MonoInst *getaddr, *dummy_use;
4517 if (!cfg->got_var || cfg->got_var_allocated)
4520 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4521 getaddr->cil_code = cfg->header->code;
4522 getaddr->dreg = cfg->got_var->dreg;
4524 /* Add it to the start of the first bblock */
4525 if (cfg->bb_entry->code) {
4526 getaddr->next = cfg->bb_entry->code;
4527 cfg->bb_entry->code = getaddr;
4530 MONO_ADD_INS (cfg->bb_entry, getaddr);
4532 cfg->got_var_allocated = TRUE;
4535 * Add a dummy use to keep the got_var alive, since real uses might
4536 * only be generated by the back ends.
4537 * Add it to end_bblock, so the variable's lifetime covers the whole
4539 * It would be better to make the usage of the got var explicit in all
4540 * cases when the backend needs it (i.e. calls, throw etc.), so this
4541 * wouldn't be needed.
4543 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4544 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4547 static int inline_limit;
4548 static gboolean inline_limit_inited;
4551 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4553 MonoMethodHeaderSummary header;
4555 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4556 MonoMethodSignature *sig = mono_method_signature (method);
4560 if (cfg->disable_inline)
4565 if (cfg->inline_depth > 10)
4568 if (!mono_method_get_header_summary (method, &header))
4571 /*runtime, icall and pinvoke are checked by summary call*/
4572 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4573 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4574 (mono_class_is_marshalbyref (method->klass)) ||
4578 /* also consider num_locals? */
4579 /* Do the size check early to avoid creating vtables */
4580 if (!inline_limit_inited) {
4581 if (g_getenv ("MONO_INLINELIMIT"))
4582 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4584 inline_limit = INLINE_LENGTH_LIMIT;
4585 inline_limit_inited = TRUE;
4587 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4591 * if we can initialize the class of the method right away, we do,
4592 * otherwise we don't allow inlining if the class needs initialization,
4593 * since it would mean inserting a call to mono_runtime_class_init()
4594 * inside the inlined code
4596 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4599 if (!(cfg->opt & MONO_OPT_SHARED)) {
4600 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4601 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4602 if (method->klass->has_cctor) {
4603 vtable = mono_class_vtable (cfg->domain, method->klass);
4606 if (!cfg->compile_aot) {
4608 if (!mono_runtime_class_init_full (vtable, &error)) {
4609 mono_error_cleanup (&error);
4614 } else if (mono_class_is_before_field_init (method->klass)) {
4615 if (cfg->run_cctors && method->klass->has_cctor) {
4616 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4617 if (!method->klass->runtime_info)
4618 /* No vtable created yet */
4620 vtable = mono_class_vtable (cfg->domain, method->klass);
4623 /* This makes so that inline cannot trigger */
4624 /* .cctors: too many apps depend on them */
4625 /* running with a specific order... */
4626 if (! vtable->initialized)
4629 if (!mono_runtime_class_init_full (vtable, &error)) {
4630 mono_error_cleanup (&error);
4634 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4635 if (!method->klass->runtime_info)
4636 /* No vtable created yet */
4638 vtable = mono_class_vtable (cfg->domain, method->klass);
4641 if (!vtable->initialized)
4646 * If we're compiling for shared code
4647 * the cctor will need to be run at aot method load time, for example,
4648 * or at the end of the compilation of the inlining method.
4650 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4654 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4655 if (mono_arch_is_soft_float ()) {
4657 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4659 for (i = 0; i < sig->param_count; ++i)
4660 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4665 if (g_list_find (cfg->dont_inline, method))
4672 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4674 if (!cfg->compile_aot) {
4676 if (vtable->initialized)
4680 if (mono_class_is_before_field_init (klass)) {
4681 if (cfg->method == method)
4685 if (!mono_class_needs_cctor_run (klass, method))
4688 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4689 /* The initialization is already done before the method is called */
4696 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4700 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4703 if (mini_is_gsharedvt_variable_klass (klass)) {
4706 mono_class_init (klass);
4707 size = mono_class_array_element_size (klass);
4710 mult_reg = alloc_preg (cfg);
4711 array_reg = arr->dreg;
4712 index_reg = index->dreg;
4714 #if SIZEOF_REGISTER == 8
4715 /* The array reg is 64 bits but the index reg is only 32 */
4716 if (COMPILE_LLVM (cfg)) {
4718 index2_reg = index_reg;
4720 index2_reg = alloc_preg (cfg);
4721 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4724 if (index->type == STACK_I8) {
4725 index2_reg = alloc_preg (cfg);
4726 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4728 index2_reg = index_reg;
4733 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4735 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4736 if (size == 1 || size == 2 || size == 4 || size == 8) {
4737 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4739 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4740 ins->klass = mono_class_get_element_class (klass);
4741 ins->type = STACK_MP;
4747 add_reg = alloc_ireg_mp (cfg);
4750 MonoInst *rgctx_ins;
4753 g_assert (cfg->gshared);
4754 context_used = mini_class_check_context_used (cfg, klass);
4755 g_assert (context_used);
4756 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4757 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4761 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4762 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4763 ins->klass = mono_class_get_element_class (klass);
4764 ins->type = STACK_MP;
4765 MONO_ADD_INS (cfg->cbb, ins);
4771 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4773 int bounds_reg = alloc_preg (cfg);
4774 int add_reg = alloc_ireg_mp (cfg);
4775 int mult_reg = alloc_preg (cfg);
4776 int mult2_reg = alloc_preg (cfg);
4777 int low1_reg = alloc_preg (cfg);
4778 int low2_reg = alloc_preg (cfg);
4779 int high1_reg = alloc_preg (cfg);
4780 int high2_reg = alloc_preg (cfg);
4781 int realidx1_reg = alloc_preg (cfg);
4782 int realidx2_reg = alloc_preg (cfg);
4783 int sum_reg = alloc_preg (cfg);
4784 int index1, index2, tmpreg;
4788 mono_class_init (klass);
4789 size = mono_class_array_element_size (klass);
4791 index1 = index_ins1->dreg;
4792 index2 = index_ins2->dreg;
4794 #if SIZEOF_REGISTER == 8
4795 /* The array reg is 64 bits but the index reg is only 32 */
4796 if (COMPILE_LLVM (cfg)) {
4799 tmpreg = alloc_preg (cfg);
4800 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4802 tmpreg = alloc_preg (cfg);
4803 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4807 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4811 /* range checking */
4812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4813 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4815 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4816 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4817 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4818 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4819 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4820 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4821 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4823 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4824 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4825 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4826 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4827 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4828 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4829 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4831 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4832 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4833 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4834 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4835 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4837 ins->type = STACK_MP;
4839 MONO_ADD_INS (cfg->cbb, ins);
4845 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4849 MonoMethod *addr_method;
4851 MonoClass *eclass = cmethod->klass->element_class;
4853 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4856 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4858 /* emit_ldelema_2 depends on OP_LMUL */
4859 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4860 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4863 if (mini_is_gsharedvt_variable_klass (eclass))
4866 element_size = mono_class_array_element_size (eclass);
4867 addr_method = mono_marshal_get_array_address (rank, element_size);
4868 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4873 static MonoBreakPolicy
4874 always_insert_breakpoint (MonoMethod *method)
4876 return MONO_BREAK_POLICY_ALWAYS;
4879 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4882 * mono_set_break_policy:
4883 * policy_callback: the new callback function
4885 * Allow embedders to decide wherther to actually obey breakpoint instructions
4886 * (both break IL instructions and Debugger.Break () method calls), for example
4887 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4888 * untrusted or semi-trusted code.
4890 * @policy_callback will be called every time a break point instruction needs to
4891 * be inserted with the method argument being the method that calls Debugger.Break()
4892 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4893 * if it wants the breakpoint to not be effective in the given method.
4894 * #MONO_BREAK_POLICY_ALWAYS is the default.
4897 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4899 if (policy_callback)
4900 break_policy_func = policy_callback;
4902 break_policy_func = always_insert_breakpoint;
4906 should_insert_brekpoint (MonoMethod *method) {
4907 switch (break_policy_func (method)) {
4908 case MONO_BREAK_POLICY_ALWAYS:
4910 case MONO_BREAK_POLICY_NEVER:
4912 case MONO_BREAK_POLICY_ON_DBG:
4913 g_warning ("mdb no longer supported");
4916 g_warning ("Incorrect value returned from break policy callback");
4921 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4923 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4925 MonoInst *addr, *store, *load;
4926 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4928 /* the bounds check is already done by the callers */
4929 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4931 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4932 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4933 if (mini_type_is_reference (&eklass->byval_arg))
4934 emit_write_barrier (cfg, addr, load);
4936 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4937 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4944 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4946 return mini_type_is_reference (&klass->byval_arg);
4950 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4952 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4953 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4954 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4955 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4956 MonoInst *iargs [3];
4959 mono_class_setup_vtable (obj_array);
4960 g_assert (helper->slot);
4962 if (sp [0]->type != STACK_OBJ)
4964 if (sp [2]->type != STACK_OBJ)
4971 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4975 if (mini_is_gsharedvt_variable_klass (klass)) {
4978 // FIXME-VT: OP_ICONST optimization
4979 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4980 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4981 ins->opcode = OP_STOREV_MEMBASE;
4982 } else if (sp [1]->opcode == OP_ICONST) {
4983 int array_reg = sp [0]->dreg;
4984 int index_reg = sp [1]->dreg;
4985 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4987 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4988 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4991 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4992 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4994 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4995 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4996 if (generic_class_is_reference_type (cfg, klass))
4997 emit_write_barrier (cfg, addr, sp [2]);
5004 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5009 eklass = mono_class_from_mono_type (fsig->params [2]);
5011 eklass = mono_class_from_mono_type (fsig->ret);
5014 return emit_array_store (cfg, eklass, args, FALSE);
5016 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5017 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5023 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5026 int param_size, return_size;
5028 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5029 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5031 if (cfg->verbose_level > 3)
5032 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5034 //Don't allow mixing reference types with value types
5035 if (param_klass->valuetype != return_klass->valuetype) {
5036 if (cfg->verbose_level > 3)
5037 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5041 if (!param_klass->valuetype) {
5042 if (cfg->verbose_level > 3)
5043 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5048 if (param_klass->has_references || return_klass->has_references)
5051 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5052 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5053 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5054 if (cfg->verbose_level > 3)
5055 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5059 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5060 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5061 if (cfg->verbose_level > 3)
5062 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5066 param_size = mono_class_value_size (param_klass, &align);
5067 return_size = mono_class_value_size (return_klass, &align);
5069 //We can do it if sizes match
5070 if (param_size == return_size) {
5071 if (cfg->verbose_level > 3)
5072 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5076 //No simple way to handle struct if sizes don't match
5077 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5078 if (cfg->verbose_level > 3)
5079 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5084 * Same reg size category.
5085 * A quick note on why we don't require widening here.
5086 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5088 * Since the source value comes from a function argument, the JIT will already have
5089 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5091 if (param_size <= 4 && return_size <= 4) {
5092 if (cfg->verbose_level > 3)
5093 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5101 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5103 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5104 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5106 if (mini_is_gsharedvt_variable_type (fsig->ret))
5109 //Valuetypes that are semantically equivalent or numbers than can be widened to
5110 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5113 //Arrays of valuetypes that are semantically equivalent
5114 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5121 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5123 #ifdef MONO_ARCH_SIMD_INTRINSICS
5124 MonoInst *ins = NULL;
5126 if (cfg->opt & MONO_OPT_SIMD) {
5127 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5133 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5137 emit_memory_barrier (MonoCompile *cfg, int kind)
5139 MonoInst *ins = NULL;
5140 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5141 MONO_ADD_INS (cfg->cbb, ins);
5142 ins->backend.memory_barrier_kind = kind;
5148 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5150 MonoInst *ins = NULL;
5153 /* The LLVM backend supports these intrinsics */
5154 if (cmethod->klass == mono_defaults.math_class) {
5155 if (strcmp (cmethod->name, "Sin") == 0) {
5157 } else if (strcmp (cmethod->name, "Cos") == 0) {
5159 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5161 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5165 if (opcode && fsig->param_count == 1) {
5166 MONO_INST_NEW (cfg, ins, opcode);
5167 ins->type = STACK_R8;
5168 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5169 ins->sreg1 = args [0]->dreg;
5170 MONO_ADD_INS (cfg->cbb, ins);
5174 if (cfg->opt & MONO_OPT_CMOV) {
5175 if (strcmp (cmethod->name, "Min") == 0) {
5176 if (fsig->params [0]->type == MONO_TYPE_I4)
5178 if (fsig->params [0]->type == MONO_TYPE_U4)
5179 opcode = OP_IMIN_UN;
5180 else if (fsig->params [0]->type == MONO_TYPE_I8)
5182 else if (fsig->params [0]->type == MONO_TYPE_U8)
5183 opcode = OP_LMIN_UN;
5184 } else if (strcmp (cmethod->name, "Max") == 0) {
5185 if (fsig->params [0]->type == MONO_TYPE_I4)
5187 if (fsig->params [0]->type == MONO_TYPE_U4)
5188 opcode = OP_IMAX_UN;
5189 else if (fsig->params [0]->type == MONO_TYPE_I8)
5191 else if (fsig->params [0]->type == MONO_TYPE_U8)
5192 opcode = OP_LMAX_UN;
5196 if (opcode && fsig->param_count == 2) {
5197 MONO_INST_NEW (cfg, ins, opcode);
5198 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5199 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5200 ins->sreg1 = args [0]->dreg;
5201 ins->sreg2 = args [1]->dreg;
5202 MONO_ADD_INS (cfg->cbb, ins);
5210 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5212 if (cmethod->klass == mono_defaults.array_class) {
5213 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5214 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5215 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5216 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5217 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5218 return emit_array_unsafe_mov (cfg, fsig, args);
5225 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5227 MonoInst *ins = NULL;
5228 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5230 if (cmethod->klass == mono_defaults.string_class) {
5231 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5232 int dreg = alloc_ireg (cfg);
5233 int index_reg = alloc_preg (cfg);
5234 int add_reg = alloc_preg (cfg);
5236 #if SIZEOF_REGISTER == 8
5237 if (COMPILE_LLVM (cfg)) {
5238 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5240 /* The array reg is 64 bits but the index reg is only 32 */
5241 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5244 index_reg = args [1]->dreg;
5246 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5248 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5249 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5250 add_reg = ins->dreg;
5251 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5254 int mult_reg = alloc_preg (cfg);
5255 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5256 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5257 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5258 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5260 type_from_op (cfg, ins, NULL, NULL);
5262 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5263 int dreg = alloc_ireg (cfg);
5264 /* Decompose later to allow more optimizations */
5265 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5266 ins->type = STACK_I4;
5267 ins->flags |= MONO_INST_FAULT;
5268 cfg->cbb->has_array_access = TRUE;
5269 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5274 } else if (cmethod->klass == mono_defaults.object_class) {
5275 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5276 int dreg = alloc_ireg_ref (cfg);
5277 int vt_reg = alloc_preg (cfg);
5278 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5279 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5280 type_from_op (cfg, ins, NULL, NULL);
5283 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5284 int dreg = alloc_ireg (cfg);
5285 int t1 = alloc_ireg (cfg);
5287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5288 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5289 ins->type = STACK_I4;
5292 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5293 MONO_INST_NEW (cfg, ins, OP_NOP);
5294 MONO_ADD_INS (cfg->cbb, ins);
5298 } else if (cmethod->klass == mono_defaults.array_class) {
5299 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5300 return emit_array_generic_access (cfg, fsig, args, FALSE);
5301 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5302 return emit_array_generic_access (cfg, fsig, args, TRUE);
5304 #ifndef MONO_BIG_ARRAYS
5306 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5309 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5310 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5311 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5312 int dreg = alloc_ireg (cfg);
5313 int bounds_reg = alloc_ireg_mp (cfg);
5314 MonoBasicBlock *end_bb, *szarray_bb;
5315 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5317 NEW_BBLOCK (cfg, end_bb);
5318 NEW_BBLOCK (cfg, szarray_bb);
5320 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5321 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5322 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5323 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5324 /* Non-szarray case */
5326 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5327 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5329 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5330 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5331 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5332 MONO_START_BB (cfg, szarray_bb);
5335 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5336 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5338 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5339 MONO_START_BB (cfg, end_bb);
5341 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5342 ins->type = STACK_I4;
5348 if (cmethod->name [0] != 'g')
5351 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5352 int dreg = alloc_ireg (cfg);
5353 int vtable_reg = alloc_preg (cfg);
5354 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5355 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5357 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5358 type_from_op (cfg, ins, NULL, NULL);
5361 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5362 int dreg = alloc_ireg (cfg);
5364 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5365 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5366 type_from_op (cfg, ins, NULL, NULL);
5371 } else if (cmethod->klass == runtime_helpers_class) {
5372 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5373 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5375 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5376 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5378 g_assert (ctx->method_inst);
5379 g_assert (ctx->method_inst->type_argc == 1);
5380 MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
5381 MonoClass *klass = mono_class_from_mono_type (t);
5385 mono_class_init (klass);
5386 if (MONO_TYPE_IS_REFERENCE (t))
5387 EMIT_NEW_ICONST (cfg, ins, 1);
5388 else if (MONO_TYPE_IS_PRIMITIVE (t))
5389 EMIT_NEW_ICONST (cfg, ins, 0);
5390 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5391 EMIT_NEW_ICONST (cfg, ins, 1);
5392 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5393 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5395 g_assert (cfg->gshared);
5397 int context_used = mini_class_check_context_used (cfg, klass);
5399 /* This returns 1 or 2 */
5400 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5401 int dreg = alloc_ireg (cfg);
5402 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5408 } else if (cmethod->klass == mono_defaults.monitor_class) {
5409 gboolean is_enter = FALSE;
5410 gboolean is_v4 = FALSE;
5412 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5416 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5421 * To make async stack traces work, icalls which can block should have a wrapper.
5422 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5424 MonoBasicBlock *end_bb;
5426 NEW_BBLOCK (cfg, end_bb);
5428 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5430 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5431 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5432 MONO_START_BB (cfg, end_bb);
5435 } else if (cmethod->klass == mono_defaults.thread_class) {
5436 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5437 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5438 MONO_ADD_INS (cfg->cbb, ins);
5440 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5441 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5442 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5444 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5446 if (fsig->params [0]->type == MONO_TYPE_I1)
5447 opcode = OP_LOADI1_MEMBASE;
5448 else if (fsig->params [0]->type == MONO_TYPE_U1)
5449 opcode = OP_LOADU1_MEMBASE;
5450 else if (fsig->params [0]->type == MONO_TYPE_I2)
5451 opcode = OP_LOADI2_MEMBASE;
5452 else if (fsig->params [0]->type == MONO_TYPE_U2)
5453 opcode = OP_LOADU2_MEMBASE;
5454 else if (fsig->params [0]->type == MONO_TYPE_I4)
5455 opcode = OP_LOADI4_MEMBASE;
5456 else if (fsig->params [0]->type == MONO_TYPE_U4)
5457 opcode = OP_LOADU4_MEMBASE;
5458 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5459 opcode = OP_LOADI8_MEMBASE;
5460 else if (fsig->params [0]->type == MONO_TYPE_R4)
5461 opcode = OP_LOADR4_MEMBASE;
5462 else if (fsig->params [0]->type == MONO_TYPE_R8)
5463 opcode = OP_LOADR8_MEMBASE;
5464 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5465 opcode = OP_LOAD_MEMBASE;
5468 MONO_INST_NEW (cfg, ins, opcode);
5469 ins->inst_basereg = args [0]->dreg;
5470 ins->inst_offset = 0;
5471 MONO_ADD_INS (cfg->cbb, ins);
5473 switch (fsig->params [0]->type) {
5480 ins->dreg = mono_alloc_ireg (cfg);
5481 ins->type = STACK_I4;
5485 ins->dreg = mono_alloc_lreg (cfg);
5486 ins->type = STACK_I8;
5490 ins->dreg = mono_alloc_ireg (cfg);
5491 #if SIZEOF_REGISTER == 8
5492 ins->type = STACK_I8;
5494 ins->type = STACK_I4;
5499 ins->dreg = mono_alloc_freg (cfg);
5500 ins->type = STACK_R8;
5503 g_assert (mini_type_is_reference (fsig->params [0]));
5504 ins->dreg = mono_alloc_ireg_ref (cfg);
5505 ins->type = STACK_OBJ;
5509 if (opcode == OP_LOADI8_MEMBASE)
5510 ins = mono_decompose_opcode (cfg, ins);
5512 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5516 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5518 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5520 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5521 opcode = OP_STOREI1_MEMBASE_REG;
5522 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5523 opcode = OP_STOREI2_MEMBASE_REG;
5524 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5525 opcode = OP_STOREI4_MEMBASE_REG;
5526 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5527 opcode = OP_STOREI8_MEMBASE_REG;
5528 else if (fsig->params [0]->type == MONO_TYPE_R4)
5529 opcode = OP_STORER4_MEMBASE_REG;
5530 else if (fsig->params [0]->type == MONO_TYPE_R8)
5531 opcode = OP_STORER8_MEMBASE_REG;
5532 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5533 opcode = OP_STORE_MEMBASE_REG;
5536 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5538 MONO_INST_NEW (cfg, ins, opcode);
5539 ins->sreg1 = args [1]->dreg;
5540 ins->inst_destbasereg = args [0]->dreg;
5541 ins->inst_offset = 0;
5542 MONO_ADD_INS (cfg->cbb, ins);
5544 if (opcode == OP_STOREI8_MEMBASE_REG)
5545 ins = mono_decompose_opcode (cfg, ins);
5550 } else if (cmethod->klass->image == mono_defaults.corlib &&
5551 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5552 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5555 #if SIZEOF_REGISTER == 8
5556 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5557 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5558 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5559 ins->dreg = mono_alloc_preg (cfg);
5560 ins->sreg1 = args [0]->dreg;
5561 ins->type = STACK_I8;
5562 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5563 MONO_ADD_INS (cfg->cbb, ins);
5567 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5569 /* 64 bit reads are already atomic */
5570 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5571 load_ins->dreg = mono_alloc_preg (cfg);
5572 load_ins->inst_basereg = args [0]->dreg;
5573 load_ins->inst_offset = 0;
5574 load_ins->type = STACK_I8;
5575 MONO_ADD_INS (cfg->cbb, load_ins);
5577 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5584 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5585 MonoInst *ins_iconst;
5588 if (fsig->params [0]->type == MONO_TYPE_I4) {
5589 opcode = OP_ATOMIC_ADD_I4;
5590 cfg->has_atomic_add_i4 = TRUE;
5592 #if SIZEOF_REGISTER == 8
5593 else if (fsig->params [0]->type == MONO_TYPE_I8)
5594 opcode = OP_ATOMIC_ADD_I8;
5597 if (!mono_arch_opcode_supported (opcode))
5599 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5600 ins_iconst->inst_c0 = 1;
5601 ins_iconst->dreg = mono_alloc_ireg (cfg);
5602 MONO_ADD_INS (cfg->cbb, ins_iconst);
5604 MONO_INST_NEW (cfg, ins, opcode);
5605 ins->dreg = mono_alloc_ireg (cfg);
5606 ins->inst_basereg = args [0]->dreg;
5607 ins->inst_offset = 0;
5608 ins->sreg2 = ins_iconst->dreg;
5609 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5610 MONO_ADD_INS (cfg->cbb, ins);
5612 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5613 MonoInst *ins_iconst;
5616 if (fsig->params [0]->type == MONO_TYPE_I4) {
5617 opcode = OP_ATOMIC_ADD_I4;
5618 cfg->has_atomic_add_i4 = TRUE;
5620 #if SIZEOF_REGISTER == 8
5621 else if (fsig->params [0]->type == MONO_TYPE_I8)
5622 opcode = OP_ATOMIC_ADD_I8;
5625 if (!mono_arch_opcode_supported (opcode))
5627 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5628 ins_iconst->inst_c0 = -1;
5629 ins_iconst->dreg = mono_alloc_ireg (cfg);
5630 MONO_ADD_INS (cfg->cbb, ins_iconst);
5632 MONO_INST_NEW (cfg, ins, opcode);
5633 ins->dreg = mono_alloc_ireg (cfg);
5634 ins->inst_basereg = args [0]->dreg;
5635 ins->inst_offset = 0;
5636 ins->sreg2 = ins_iconst->dreg;
5637 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5638 MONO_ADD_INS (cfg->cbb, ins);
5640 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5643 if (fsig->params [0]->type == MONO_TYPE_I4) {
5644 opcode = OP_ATOMIC_ADD_I4;
5645 cfg->has_atomic_add_i4 = TRUE;
5647 #if SIZEOF_REGISTER == 8
5648 else if (fsig->params [0]->type == MONO_TYPE_I8)
5649 opcode = OP_ATOMIC_ADD_I8;
5652 if (!mono_arch_opcode_supported (opcode))
5654 MONO_INST_NEW (cfg, ins, opcode);
5655 ins->dreg = mono_alloc_ireg (cfg);
5656 ins->inst_basereg = args [0]->dreg;
5657 ins->inst_offset = 0;
5658 ins->sreg2 = args [1]->dreg;
5659 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5660 MONO_ADD_INS (cfg->cbb, ins);
5663 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5664 MonoInst *f2i = NULL, *i2f;
5665 guint32 opcode, f2i_opcode, i2f_opcode;
5666 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5667 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5669 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5670 fsig->params [0]->type == MONO_TYPE_R4) {
5671 opcode = OP_ATOMIC_EXCHANGE_I4;
5672 f2i_opcode = OP_MOVE_F_TO_I4;
5673 i2f_opcode = OP_MOVE_I4_TO_F;
5674 cfg->has_atomic_exchange_i4 = TRUE;
5676 #if SIZEOF_REGISTER == 8
5678 fsig->params [0]->type == MONO_TYPE_I8 ||
5679 fsig->params [0]->type == MONO_TYPE_R8 ||
5680 fsig->params [0]->type == MONO_TYPE_I) {
5681 opcode = OP_ATOMIC_EXCHANGE_I8;
5682 f2i_opcode = OP_MOVE_F_TO_I8;
5683 i2f_opcode = OP_MOVE_I8_TO_F;
5686 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5687 opcode = OP_ATOMIC_EXCHANGE_I4;
5688 cfg->has_atomic_exchange_i4 = TRUE;
5694 if (!mono_arch_opcode_supported (opcode))
5698 /* TODO: Decompose these opcodes instead of bailing here. */
5699 if (COMPILE_SOFT_FLOAT (cfg))
5702 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5703 f2i->dreg = mono_alloc_ireg (cfg);
5704 f2i->sreg1 = args [1]->dreg;
5705 if (f2i_opcode == OP_MOVE_F_TO_I4)
5706 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5707 MONO_ADD_INS (cfg->cbb, f2i);
5710 MONO_INST_NEW (cfg, ins, opcode);
5711 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5712 ins->inst_basereg = args [0]->dreg;
5713 ins->inst_offset = 0;
5714 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5715 MONO_ADD_INS (cfg->cbb, ins);
5717 switch (fsig->params [0]->type) {
5719 ins->type = STACK_I4;
5722 ins->type = STACK_I8;
5725 #if SIZEOF_REGISTER == 8
5726 ins->type = STACK_I8;
5728 ins->type = STACK_I4;
5733 ins->type = STACK_R8;
5736 g_assert (mini_type_is_reference (fsig->params [0]));
5737 ins->type = STACK_OBJ;
5742 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5743 i2f->dreg = mono_alloc_freg (cfg);
5744 i2f->sreg1 = ins->dreg;
5745 i2f->type = STACK_R8;
5746 if (i2f_opcode == OP_MOVE_I4_TO_F)
5747 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5748 MONO_ADD_INS (cfg->cbb, i2f);
5753 if (cfg->gen_write_barriers && is_ref)
5754 emit_write_barrier (cfg, args [0], args [1]);
5756 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5757 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5758 guint32 opcode, f2i_opcode, i2f_opcode;
5759 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5760 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5762 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5763 fsig->params [1]->type == MONO_TYPE_R4) {
5764 opcode = OP_ATOMIC_CAS_I4;
5765 f2i_opcode = OP_MOVE_F_TO_I4;
5766 i2f_opcode = OP_MOVE_I4_TO_F;
5767 cfg->has_atomic_cas_i4 = TRUE;
5769 #if SIZEOF_REGISTER == 8
5771 fsig->params [1]->type == MONO_TYPE_I8 ||
5772 fsig->params [1]->type == MONO_TYPE_R8 ||
5773 fsig->params [1]->type == MONO_TYPE_I) {
5774 opcode = OP_ATOMIC_CAS_I8;
5775 f2i_opcode = OP_MOVE_F_TO_I8;
5776 i2f_opcode = OP_MOVE_I8_TO_F;
5779 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5780 opcode = OP_ATOMIC_CAS_I4;
5781 cfg->has_atomic_cas_i4 = TRUE;
5787 if (!mono_arch_opcode_supported (opcode))
5791 /* TODO: Decompose these opcodes instead of bailing here. */
5792 if (COMPILE_SOFT_FLOAT (cfg))
5795 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5796 f2i_new->dreg = mono_alloc_ireg (cfg);
5797 f2i_new->sreg1 = args [1]->dreg;
5798 if (f2i_opcode == OP_MOVE_F_TO_I4)
5799 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5800 MONO_ADD_INS (cfg->cbb, f2i_new);
5802 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5803 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5804 f2i_cmp->sreg1 = args [2]->dreg;
5805 if (f2i_opcode == OP_MOVE_F_TO_I4)
5806 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5807 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5810 MONO_INST_NEW (cfg, ins, opcode);
5811 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5812 ins->sreg1 = args [0]->dreg;
5813 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5814 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5815 MONO_ADD_INS (cfg->cbb, ins);
5817 switch (fsig->params [1]->type) {
5819 ins->type = STACK_I4;
5822 ins->type = STACK_I8;
5825 #if SIZEOF_REGISTER == 8
5826 ins->type = STACK_I8;
5828 ins->type = STACK_I4;
5832 ins->type = cfg->r4_stack_type;
5835 ins->type = STACK_R8;
5838 g_assert (mini_type_is_reference (fsig->params [1]));
5839 ins->type = STACK_OBJ;
5844 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5845 i2f->dreg = mono_alloc_freg (cfg);
5846 i2f->sreg1 = ins->dreg;
5847 i2f->type = STACK_R8;
5848 if (i2f_opcode == OP_MOVE_I4_TO_F)
5849 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5850 MONO_ADD_INS (cfg->cbb, i2f);
5855 if (cfg->gen_write_barriers && is_ref)
5856 emit_write_barrier (cfg, args [0], args [1]);
5858 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5859 fsig->params [1]->type == MONO_TYPE_I4) {
5860 MonoInst *cmp, *ceq;
5862 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5865 /* int32 r = CAS (location, value, comparand); */
5866 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5867 ins->dreg = alloc_ireg (cfg);
5868 ins->sreg1 = args [0]->dreg;
5869 ins->sreg2 = args [1]->dreg;
5870 ins->sreg3 = args [2]->dreg;
5871 ins->type = STACK_I4;
5872 MONO_ADD_INS (cfg->cbb, ins);
5874 /* bool result = r == comparand; */
5875 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5876 cmp->sreg1 = ins->dreg;
5877 cmp->sreg2 = args [2]->dreg;
5878 cmp->type = STACK_I4;
5879 MONO_ADD_INS (cfg->cbb, cmp);
5881 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5882 ceq->dreg = alloc_ireg (cfg);
5883 ceq->type = STACK_I4;
5884 MONO_ADD_INS (cfg->cbb, ceq);
5886 /* *success = result; */
5887 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5889 cfg->has_atomic_cas_i4 = TRUE;
5891 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5892 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5896 } else if (cmethod->klass->image == mono_defaults.corlib &&
5897 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5898 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5901 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5903 MonoType *t = fsig->params [0];
5905 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5907 g_assert (t->byref);
5908 /* t is a byref type, so the reference check is more complicated */
5909 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5910 if (t->type == MONO_TYPE_I1)
5911 opcode = OP_ATOMIC_LOAD_I1;
5912 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5913 opcode = OP_ATOMIC_LOAD_U1;
5914 else if (t->type == MONO_TYPE_I2)
5915 opcode = OP_ATOMIC_LOAD_I2;
5916 else if (t->type == MONO_TYPE_U2)
5917 opcode = OP_ATOMIC_LOAD_U2;
5918 else if (t->type == MONO_TYPE_I4)
5919 opcode = OP_ATOMIC_LOAD_I4;
5920 else if (t->type == MONO_TYPE_U4)
5921 opcode = OP_ATOMIC_LOAD_U4;
5922 else if (t->type == MONO_TYPE_R4)
5923 opcode = OP_ATOMIC_LOAD_R4;
5924 else if (t->type == MONO_TYPE_R8)
5925 opcode = OP_ATOMIC_LOAD_R8;
5926 #if SIZEOF_REGISTER == 8
5927 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5928 opcode = OP_ATOMIC_LOAD_I8;
5929 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5930 opcode = OP_ATOMIC_LOAD_U8;
5932 else if (t->type == MONO_TYPE_I)
5933 opcode = OP_ATOMIC_LOAD_I4;
5934 else if (is_ref || t->type == MONO_TYPE_U)
5935 opcode = OP_ATOMIC_LOAD_U4;
5939 if (!mono_arch_opcode_supported (opcode))
5942 MONO_INST_NEW (cfg, ins, opcode);
5943 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5944 ins->sreg1 = args [0]->dreg;
5945 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5946 MONO_ADD_INS (cfg->cbb, ins);
5949 case MONO_TYPE_BOOLEAN:
5956 ins->type = STACK_I4;
5960 ins->type = STACK_I8;
5964 #if SIZEOF_REGISTER == 8
5965 ins->type = STACK_I8;
5967 ins->type = STACK_I4;
5971 ins->type = cfg->r4_stack_type;
5974 ins->type = STACK_R8;
5978 ins->type = STACK_OBJ;
5984 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5986 MonoType *t = fsig->params [0];
5989 g_assert (t->byref);
5990 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5991 if (t->type == MONO_TYPE_I1)
5992 opcode = OP_ATOMIC_STORE_I1;
5993 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5994 opcode = OP_ATOMIC_STORE_U1;
5995 else if (t->type == MONO_TYPE_I2)
5996 opcode = OP_ATOMIC_STORE_I2;
5997 else if (t->type == MONO_TYPE_U2)
5998 opcode = OP_ATOMIC_STORE_U2;
5999 else if (t->type == MONO_TYPE_I4)
6000 opcode = OP_ATOMIC_STORE_I4;
6001 else if (t->type == MONO_TYPE_U4)
6002 opcode = OP_ATOMIC_STORE_U4;
6003 else if (t->type == MONO_TYPE_R4)
6004 opcode = OP_ATOMIC_STORE_R4;
6005 else if (t->type == MONO_TYPE_R8)
6006 opcode = OP_ATOMIC_STORE_R8;
6007 #if SIZEOF_REGISTER == 8
6008 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6009 opcode = OP_ATOMIC_STORE_I8;
6010 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6011 opcode = OP_ATOMIC_STORE_U8;
6013 else if (t->type == MONO_TYPE_I)
6014 opcode = OP_ATOMIC_STORE_I4;
6015 else if (is_ref || t->type == MONO_TYPE_U)
6016 opcode = OP_ATOMIC_STORE_U4;
6020 if (!mono_arch_opcode_supported (opcode))
6023 MONO_INST_NEW (cfg, ins, opcode);
6024 ins->dreg = args [0]->dreg;
6025 ins->sreg1 = args [1]->dreg;
6026 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6027 MONO_ADD_INS (cfg->cbb, ins);
6029 if (cfg->gen_write_barriers && is_ref)
6030 emit_write_barrier (cfg, args [0], args [1]);
6036 } else if (cmethod->klass->image == mono_defaults.corlib &&
6037 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6038 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6039 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6040 if (should_insert_brekpoint (cfg->method)) {
6041 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6043 MONO_INST_NEW (cfg, ins, OP_NOP);
6044 MONO_ADD_INS (cfg->cbb, ins);
6048 } else if (cmethod->klass->image == mono_defaults.corlib &&
6049 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6050 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6051 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6053 EMIT_NEW_ICONST (cfg, ins, 1);
6055 EMIT_NEW_ICONST (cfg, ins, 0);
6058 } else if (cmethod->klass->image == mono_defaults.corlib &&
6059 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6060 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6061 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6062 /* No stack walks are currently available, so implement this as an intrinsic */
6063 MonoInst *assembly_ins;
6065 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6066 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6069 } else if (cmethod->klass->image == mono_defaults.corlib &&
6070 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6071 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6072 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6073 /* No stack walks are currently available, so implement this as an intrinsic */
6074 MonoInst *method_ins;
6075 MonoMethod *declaring = cfg->method;
6077 /* This returns the declaring generic method */
6078 if (declaring->is_inflated)
6079 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6080 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6081 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6082 cfg->no_inline = TRUE;
6083 if (cfg->method != cfg->current_method)
6084 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6087 } else if (cmethod->klass == mono_defaults.math_class) {
6089 * There is general branchless code for Min/Max, but it does not work for
6091 * http://everything2.com/?node_id=1051618
6093 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6094 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6095 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6096 ins->dreg = alloc_preg (cfg);
6097 ins->type = STACK_I4;
6098 MONO_ADD_INS (cfg->cbb, ins);
6100 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6101 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6102 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6103 !strcmp (cmethod->klass->name, "Selector")) ||
6104 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6105 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6106 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6107 !strcmp (cmethod->klass->name, "Selector"))
6109 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6110 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6111 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6114 MonoJumpInfoToken *ji;
6117 if (args [0]->opcode == OP_GOT_ENTRY) {
6118 pi = (MonoInst *)args [0]->inst_p1;
6119 g_assert (pi->opcode == OP_PATCH_INFO);
6120 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6121 ji = (MonoJumpInfoToken *)pi->inst_p0;
6123 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6124 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6127 NULLIFY_INS (args [0]);
6129 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6130 return_val_if_nok (&cfg->error, NULL);
6132 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6133 ins->dreg = mono_alloc_ireg (cfg);
6136 MONO_ADD_INS (cfg->cbb, ins);
6141 #ifdef MONO_ARCH_SIMD_INTRINSICS
6142 if (cfg->opt & MONO_OPT_SIMD) {
6143 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6149 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6153 if (COMPILE_LLVM (cfg)) {
6154 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6159 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6163 * This entry point could be used later for arbitrary method
6166 inline static MonoInst*
6167 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6168 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6170 if (method->klass == mono_defaults.string_class) {
6171 /* managed string allocation support */
6172 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6173 MonoInst *iargs [2];
6174 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6175 MonoMethod *managed_alloc = NULL;
6177 g_assert (vtable); /*Should not fail since it System.String*/
6178 #ifndef MONO_CROSS_COMPILE
6179 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6183 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6184 iargs [1] = args [0];
6185 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6192 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6194 MonoInst *store, *temp;
6197 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6198 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6201 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6202 * would be different than the MonoInst's used to represent arguments, and
6203 * the ldelema implementation can't deal with that.
6204 * Solution: When ldelema is used on an inline argument, create a var for
6205 * it, emit ldelema on that var, and emit the saving code below in
6206 * inline_method () if needed.
6208 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6209 cfg->args [i] = temp;
6210 /* This uses cfg->args [i] which is set by the preceeding line */
6211 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6212 store->cil_code = sp [0]->cil_code;
6217 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6218 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6220 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6222 check_inline_called_method_name_limit (MonoMethod *called_method)
6225 static const char *limit = NULL;
6227 if (limit == NULL) {
6228 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6230 if (limit_string != NULL)
6231 limit = limit_string;
6236 if (limit [0] != '\0') {
6237 char *called_method_name = mono_method_full_name (called_method, TRUE);
6239 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6240 g_free (called_method_name);
6242 //return (strncmp_result <= 0);
6243 return (strncmp_result == 0);
6250 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6252 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6255 static const char *limit = NULL;
6257 if (limit == NULL) {
6258 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6259 if (limit_string != NULL) {
6260 limit = limit_string;
6266 if (limit [0] != '\0') {
6267 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6269 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6270 g_free (caller_method_name);
6272 //return (strncmp_result <= 0);
6273 return (strncmp_result == 0);
6281 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6283 static double r8_0 = 0.0;
6284 static float r4_0 = 0.0;
6288 rtype = mini_get_underlying_type (rtype);
6292 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6293 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6294 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6295 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6296 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6297 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6298 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6299 ins->type = STACK_R4;
6300 ins->inst_p0 = (void*)&r4_0;
6302 MONO_ADD_INS (cfg->cbb, ins);
6303 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6304 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6305 ins->type = STACK_R8;
6306 ins->inst_p0 = (void*)&r8_0;
6308 MONO_ADD_INS (cfg->cbb, ins);
6309 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6310 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6311 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6312 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6313 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6315 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6320 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6324 rtype = mini_get_underlying_type (rtype);
6328 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6329 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6330 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6331 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6332 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6333 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6334 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6335 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6336 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6337 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6338 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6339 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6340 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6341 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6343 emit_init_rvar (cfg, dreg, rtype);
6347 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6349 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6351 MonoInst *var = cfg->locals [local];
6352 if (COMPILE_SOFT_FLOAT (cfg)) {
6354 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6355 emit_init_rvar (cfg, reg, type);
6356 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6359 emit_init_rvar (cfg, var->dreg, type);
6361 emit_dummy_init_rvar (cfg, var->dreg, type);
6366 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6368 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6374 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6377 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6378 guchar *ip, guint real_offset, gboolean inline_always)
6381 MonoInst *ins, *rvar = NULL;
6382 MonoMethodHeader *cheader;
6383 MonoBasicBlock *ebblock, *sbblock;
6385 MonoMethod *prev_inlined_method;
6386 MonoInst **prev_locals, **prev_args;
6387 MonoType **prev_arg_types;
6388 guint prev_real_offset;
6389 GHashTable *prev_cbb_hash;
6390 MonoBasicBlock **prev_cil_offset_to_bb;
6391 MonoBasicBlock *prev_cbb;
6392 const unsigned char *prev_ip;
6393 unsigned char *prev_cil_start;
6394 guint32 prev_cil_offset_to_bb_len;
6395 MonoMethod *prev_current_method;
6396 MonoGenericContext *prev_generic_context;
6397 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6399 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6401 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6402 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6405 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6406 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6411 fsig = mono_method_signature (cmethod);
6413 if (cfg->verbose_level > 2)
6414 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6416 if (!cmethod->inline_info) {
6417 cfg->stat_inlineable_methods++;
6418 cmethod->inline_info = 1;
6421 /* allocate local variables */
6422 cheader = mono_method_get_header_checked (cmethod, &error);
6424 if (inline_always) {
6425 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6426 mono_error_move (&cfg->error, &error);
6428 mono_error_cleanup (&error);
6433 /*Must verify before creating locals as it can cause the JIT to assert.*/
6434 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6435 mono_metadata_free_mh (cheader);
6439 /* allocate space to store the return value */
6440 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6441 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6444 prev_locals = cfg->locals;
6445 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6446 for (i = 0; i < cheader->num_locals; ++i)
6447 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6449 /* allocate start and end blocks */
6450 /* This is needed so if the inline is aborted, we can clean up */
6451 NEW_BBLOCK (cfg, sbblock);
6452 sbblock->real_offset = real_offset;
6454 NEW_BBLOCK (cfg, ebblock);
6455 ebblock->block_num = cfg->num_bblocks++;
6456 ebblock->real_offset = real_offset;
6458 prev_args = cfg->args;
6459 prev_arg_types = cfg->arg_types;
6460 prev_inlined_method = cfg->inlined_method;
6461 cfg->inlined_method = cmethod;
6462 cfg->ret_var_set = FALSE;
6463 cfg->inline_depth ++;
6464 prev_real_offset = cfg->real_offset;
6465 prev_cbb_hash = cfg->cbb_hash;
6466 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6467 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6468 prev_cil_start = cfg->cil_start;
6470 prev_cbb = cfg->cbb;
6471 prev_current_method = cfg->current_method;
6472 prev_generic_context = cfg->generic_context;
6473 prev_ret_var_set = cfg->ret_var_set;
6474 prev_disable_inline = cfg->disable_inline;
6476 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6479 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6481 ret_var_set = cfg->ret_var_set;
6483 cfg->inlined_method = prev_inlined_method;
6484 cfg->real_offset = prev_real_offset;
6485 cfg->cbb_hash = prev_cbb_hash;
6486 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6487 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6488 cfg->cil_start = prev_cil_start;
6490 cfg->locals = prev_locals;
6491 cfg->args = prev_args;
6492 cfg->arg_types = prev_arg_types;
6493 cfg->current_method = prev_current_method;
6494 cfg->generic_context = prev_generic_context;
6495 cfg->ret_var_set = prev_ret_var_set;
6496 cfg->disable_inline = prev_disable_inline;
6497 cfg->inline_depth --;
6499 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6500 if (cfg->verbose_level > 2)
6501 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6503 cfg->stat_inlined_methods++;
6505 /* always add some code to avoid block split failures */
6506 MONO_INST_NEW (cfg, ins, OP_NOP);
6507 MONO_ADD_INS (prev_cbb, ins);
6509 prev_cbb->next_bb = sbblock;
6510 link_bblock (cfg, prev_cbb, sbblock);
6513 * Get rid of the begin and end bblocks if possible to aid local
6516 if (prev_cbb->out_count == 1)
6517 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6519 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6520 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6522 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6523 MonoBasicBlock *prev = ebblock->in_bb [0];
6525 if (prev->next_bb == ebblock) {
6526 mono_merge_basic_blocks (cfg, prev, ebblock);
6528 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6529 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6530 cfg->cbb = prev_cbb;
6533 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6538 * Its possible that the rvar is set in some prev bblock, but not in others.
6544 for (i = 0; i < ebblock->in_count; ++i) {
6545 bb = ebblock->in_bb [i];
6547 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6550 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6560 * If the inlined method contains only a throw, then the ret var is not
6561 * set, so set it to a dummy value.
6564 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6566 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6569 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6572 if (cfg->verbose_level > 2)
6573 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6574 cfg->exception_type = MONO_EXCEPTION_NONE;
6576 /* This gets rid of the newly added bblocks */
6577 cfg->cbb = prev_cbb;
6579 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6584 * Some of these comments may well be out-of-date.
6585 * Design decisions: we do a single pass over the IL code (and we do bblock
6586 * splitting/merging in the few cases when it's required: a back jump to an IL
6587 * address that was not already seen as bblock starting point).
6588 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6589 * Complex operations are decomposed in simpler ones right away. We need to let the
6590 * arch-specific code peek and poke inside this process somehow (except when the
6591 * optimizations can take advantage of the full semantic info of coarse opcodes).
6592 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6593 * MonoInst->opcode initially is the IL opcode or some simplification of that
6594 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6595 * opcode with value bigger than OP_LAST.
6596 * At this point the IR can be handed over to an interpreter, a dumb code generator
6597 * or to the optimizing code generator that will translate it to SSA form.
6599 * Profiling directed optimizations.
6600 * We may compile by default with few or no optimizations and instrument the code
6601 * or the user may indicate what methods to optimize the most either in a config file
6602 * or through repeated runs where the compiler applies offline the optimizations to
6603 * each method and then decides if it was worth it.
6606 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6607 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6608 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6609 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6610 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6611 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6612 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6613 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6615 /* offset from br.s -> br like opcodes */
6616 #define BIG_BRANCH_OFFSET 13
6619 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6621 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6623 return b == NULL || b == bb;
6627 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6629 unsigned char *ip = start;
6630 unsigned char *target;
6633 MonoBasicBlock *bblock;
6634 const MonoOpcode *opcode;
6637 cli_addr = ip - start;
6638 i = mono_opcode_value ((const guint8 **)&ip, end);
6641 opcode = &mono_opcodes [i];
6642 switch (opcode->argument) {
6643 case MonoInlineNone:
6646 case MonoInlineString:
6647 case MonoInlineType:
6648 case MonoInlineField:
6649 case MonoInlineMethod:
6652 case MonoShortInlineR:
6659 case MonoShortInlineVar:
6660 case MonoShortInlineI:
6663 case MonoShortInlineBrTarget:
6664 target = start + cli_addr + 2 + (signed char)ip [1];
6665 GET_BBLOCK (cfg, bblock, target);
6668 GET_BBLOCK (cfg, bblock, ip);
6670 case MonoInlineBrTarget:
6671 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6672 GET_BBLOCK (cfg, bblock, target);
6675 GET_BBLOCK (cfg, bblock, ip);
6677 case MonoInlineSwitch: {
6678 guint32 n = read32 (ip + 1);
6681 cli_addr += 5 + 4 * n;
6682 target = start + cli_addr;
6683 GET_BBLOCK (cfg, bblock, target);
6685 for (j = 0; j < n; ++j) {
6686 target = start + cli_addr + (gint32)read32 (ip);
6687 GET_BBLOCK (cfg, bblock, target);
6697 g_assert_not_reached ();
6700 if (i == CEE_THROW) {
6701 unsigned char *bb_start = ip - 1;
6703 /* Find the start of the bblock containing the throw */
6705 while ((bb_start >= start) && !bblock) {
6706 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6710 bblock->out_of_line = 1;
6720 static inline MonoMethod *
6721 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6727 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6728 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6730 method = mono_class_inflate_generic_method_checked (method, context, error);
6733 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6739 static inline MonoMethod *
6740 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6743 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6745 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6746 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6750 if (!method && !cfg)
6751 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6756 static inline MonoClass*
6757 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6762 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6763 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6765 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6766 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6769 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6770 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6773 mono_class_init (klass);
6777 static inline MonoMethodSignature*
6778 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6780 MonoMethodSignature *fsig;
6783 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6784 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6786 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6787 return_val_if_nok (error, NULL);
6790 fsig = mono_inflate_generic_signature(fsig, context, error);
6796 throw_exception (void)
6798 static MonoMethod *method = NULL;
6801 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6802 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6809 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6811 MonoMethod *thrower = throw_exception ();
6814 EMIT_NEW_PCONST (cfg, args [0], ex);
6815 mono_emit_method_call (cfg, thrower, args, NULL);
6819 * Return the original method is a wrapper is specified. We can only access
6820 * the custom attributes from the original method.
6823 get_original_method (MonoMethod *method)
6825 if (method->wrapper_type == MONO_WRAPPER_NONE)
6828 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6829 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6832 /* in other cases we need to find the original method */
6833 return mono_marshal_method_from_wrapper (method);
6837 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6839 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6840 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6842 emit_throw_exception (cfg, ex);
6846 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6848 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6849 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6851 emit_throw_exception (cfg, ex);
6855 * Check that the IL instructions at ip are the array initialization
6856 * sequence and return the pointer to the data and the size.
6859 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6862 * newarr[System.Int32]
6864 * ldtoken field valuetype ...
6865 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6867 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6869 guint32 token = read32 (ip + 7);
6870 guint32 field_token = read32 (ip + 2);
6871 guint32 field_index = field_token & 0xffffff;
6873 const char *data_ptr;
6875 MonoMethod *cmethod;
6876 MonoClass *dummy_class;
6877 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6881 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6885 *out_field_token = field_token;
6887 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6890 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6892 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6893 case MONO_TYPE_BOOLEAN:
6897 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6898 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6899 case MONO_TYPE_CHAR:
6916 if (size > mono_type_size (field->type, &dummy_align))
6919 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6920 if (!image_is_dynamic (method->klass->image)) {
6921 field_index = read32 (ip + 2) & 0xffffff;
6922 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6923 data_ptr = mono_image_rva_map (method->klass->image, rva);
6924 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6925 /* for aot code we do the lookup on load */
6926 if (aot && data_ptr)
6927 return (const char *)GUINT_TO_POINTER (rva);
6929 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6931 data_ptr = mono_field_get_data (field);
6939 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6942 char *method_fname = mono_method_full_name (method, TRUE);
6944 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6947 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6948 mono_error_cleanup (&error);
6949 } else if (header->code_size == 0)
6950 method_code = g_strdup ("method body is empty.");
6952 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6953 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6954 g_free (method_fname);
6955 g_free (method_code);
6956 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6960 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6963 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6964 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6965 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6966 /* Optimize reg-reg moves away */
6968 * Can't optimize other opcodes, since sp[0] might point to
6969 * the last ins of a decomposed opcode.
6971 sp [0]->dreg = (cfg)->locals [n]->dreg;
6973 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6978 * ldloca inhibits many optimizations so try to get rid of it in common
6981 static inline unsigned char *
6982 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6992 local = read16 (ip + 2);
6996 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6997 /* From the INITOBJ case */
6998 token = read32 (ip + 2);
6999 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7000 CHECK_TYPELOAD (klass);
7001 type = mini_get_underlying_type (&klass->byval_arg);
7002 emit_init_local (cfg, local, type, TRUE);
7010 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7012 MonoInst *icall_args [16];
7013 MonoInst *call_target, *ins, *vtable_ins;
7014 int arg_reg, this_reg, vtable_reg;
7015 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7016 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7017 gboolean variant_iface = FALSE;
7020 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7023 * In llvm-only mode, vtables contain function descriptors instead of
7024 * method addresses/trampolines.
7026 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7029 slot = mono_method_get_imt_slot (cmethod);
7031 slot = mono_method_get_vtable_index (cmethod);
7033 this_reg = sp [0]->dreg;
7035 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7036 variant_iface = TRUE;
7038 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7040 * The simplest case, a normal virtual call.
7042 int slot_reg = alloc_preg (cfg);
7043 int addr_reg = alloc_preg (cfg);
7044 int arg_reg = alloc_preg (cfg);
7045 MonoBasicBlock *non_null_bb;
7047 vtable_reg = alloc_preg (cfg);
7048 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7049 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7051 /* Load the vtable slot, which contains a function descriptor. */
7052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7054 NEW_BBLOCK (cfg, non_null_bb);
7056 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7057 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7058 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7061 // FIXME: Make the wrapper use the preserveall cconv
7062 // FIXME: Use one icall per slot for small slot numbers ?
7063 icall_args [0] = vtable_ins;
7064 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7065 /* Make the icall return the vtable slot value to save some code space */
7066 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7067 ins->dreg = slot_reg;
7068 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7071 MONO_START_BB (cfg, non_null_bb);
7072 /* Load the address + arg from the vtable slot */
7073 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7074 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7076 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7079 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7081 * A simple interface call
7083 * We make a call through an imt slot to obtain the function descriptor we need to call.
7084 * The imt slot contains a function descriptor for a runtime function + arg.
7086 int slot_reg = alloc_preg (cfg);
7087 int addr_reg = alloc_preg (cfg);
7088 int arg_reg = alloc_preg (cfg);
7089 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7091 vtable_reg = alloc_preg (cfg);
7092 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7093 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7096 * The slot is already initialized when the vtable is created so there is no need
7100 /* Load the imt slot, which contains a function descriptor. */
7101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7103 /* Load the address + arg of the imt thunk from the imt slot */
7104 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7105 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7107 * IMT thunks in llvm-only mode are C functions which take an info argument
7108 * plus the imt method and return the ftndesc to call.
7110 icall_args [0] = thunk_arg_ins;
7111 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7112 cmethod, MONO_RGCTX_INFO_METHOD);
7113 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7115 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7118 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7120 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7121 * dynamically extended as more instantiations are discovered.
7122 * This handles generic virtual methods both on classes and interfaces.
7124 int slot_reg = alloc_preg (cfg);
7125 int addr_reg = alloc_preg (cfg);
7126 int arg_reg = alloc_preg (cfg);
7127 int ftndesc_reg = alloc_preg (cfg);
7128 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7129 MonoBasicBlock *slowpath_bb, *end_bb;
7131 NEW_BBLOCK (cfg, slowpath_bb);
7132 NEW_BBLOCK (cfg, end_bb);
7134 vtable_reg = alloc_preg (cfg);
7135 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7137 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7139 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7141 /* Load the slot, which contains a function descriptor. */
7142 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7144 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7145 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7147 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7150 /* Same as with iface calls */
7151 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7152 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7153 icall_args [0] = thunk_arg_ins;
7154 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7155 cmethod, MONO_RGCTX_INFO_METHOD);
7156 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7157 ftndesc_ins->dreg = ftndesc_reg;
7159 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7160 * they don't know about yet. Fall back to the slowpath in that case.
7162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7163 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7165 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7168 MONO_START_BB (cfg, slowpath_bb);
7169 icall_args [0] = vtable_ins;
7170 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7171 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7172 cmethod, MONO_RGCTX_INFO_METHOD);
7174 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7176 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7177 ftndesc_ins->dreg = ftndesc_reg;
7178 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7181 MONO_START_BB (cfg, end_bb);
7182 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7186 * Non-optimized cases
7188 icall_args [0] = sp [0];
7189 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7191 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7192 cmethod, MONO_RGCTX_INFO_METHOD);
7194 arg_reg = alloc_preg (cfg);
7195 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7196 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7198 g_assert (is_gsharedvt);
7200 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7202 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7205 * Pass the extra argument even if the callee doesn't receive it, most
7206 * calling conventions allow this.
7208 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7212 is_exception_class (MonoClass *klass)
7215 if (klass == mono_defaults.exception_class)
7217 klass = klass->parent;
7223 * is_jit_optimizer_disabled:
7225 * Determine whenever M's assembly has a DebuggableAttribute with the
7226 * IsJITOptimizerDisabled flag set.
7229 is_jit_optimizer_disabled (MonoMethod *m)
7232 MonoAssembly *ass = m->klass->image->assembly;
7233 MonoCustomAttrInfo* attrs;
7236 gboolean val = FALSE;
7239 if (ass->jit_optimizer_disabled_inited)
7240 return ass->jit_optimizer_disabled;
7242 klass = mono_class_try_get_debuggable_attribute_class ();
7246 ass->jit_optimizer_disabled = FALSE;
7247 mono_memory_barrier ();
7248 ass->jit_optimizer_disabled_inited = TRUE;
7252 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7253 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7255 for (i = 0; i < attrs->num_attrs; ++i) {
7256 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7258 MonoMethodSignature *sig;
7260 if (!attr->ctor || attr->ctor->klass != klass)
7262 /* Decode the attribute. See reflection.c */
7263 p = (const char*)attr->data;
7264 g_assert (read16 (p) == 0x0001);
7267 // FIXME: Support named parameters
7268 sig = mono_method_signature (attr->ctor);
7269 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7271 /* Two boolean arguments */
7275 mono_custom_attrs_free (attrs);
7278 ass->jit_optimizer_disabled = val;
7279 mono_memory_barrier ();
7280 ass->jit_optimizer_disabled_inited = TRUE;
7286 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7288 gboolean supported_tail_call;
7291 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7293 for (i = 0; i < fsig->param_count; ++i) {
7294 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7295 /* These can point to the current method's stack */
7296 supported_tail_call = FALSE;
7298 if (fsig->hasthis && cmethod->klass->valuetype)
7299 /* this might point to the current method's stack */
7300 supported_tail_call = FALSE;
7301 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7302 supported_tail_call = FALSE;
7303 if (cfg->method->save_lmf)
7304 supported_tail_call = FALSE;
7305 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7306 supported_tail_call = FALSE;
7307 if (call_opcode != CEE_CALL)
7308 supported_tail_call = FALSE;
7310 /* Debugging support */
7312 if (supported_tail_call) {
7313 if (!mono_debug_count ())
7314 supported_tail_call = FALSE;
7318 return supported_tail_call;
7324 * Handle calls made to ctors from NEWOBJ opcodes.
7327 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7328 MonoInst **sp, guint8 *ip, int *inline_costs)
7330 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7332 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7333 mono_method_is_generic_sharable (cmethod, TRUE)) {
7334 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7335 mono_class_vtable (cfg->domain, cmethod->klass);
7336 CHECK_TYPELOAD (cmethod->klass);
7338 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7339 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7342 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7343 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7345 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7347 CHECK_TYPELOAD (cmethod->klass);
7348 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7353 /* Avoid virtual calls to ctors if possible */
7354 if (mono_class_is_marshalbyref (cmethod->klass))
7355 callvirt_this_arg = sp [0];
7357 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7358 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7359 CHECK_CFG_EXCEPTION;
7360 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7361 mono_method_check_inlining (cfg, cmethod) &&
7362 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7365 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7366 cfg->real_offset += 5;
7368 *inline_costs += costs - 5;
7370 INLINE_FAILURE ("inline failure");
7371 // FIXME-VT: Clean this up
7372 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7373 GSHAREDVT_FAILURE(*ip);
7374 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7376 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7379 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7381 if (cfg->llvm_only) {
7382 // FIXME: Avoid initializing vtable_arg
7383 emit_llvmonly_calli (cfg, fsig, sp, addr);
7385 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7387 } else if (context_used &&
7388 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7389 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7390 MonoInst *cmethod_addr;
7392 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7394 if (cfg->llvm_only) {
7395 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7396 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7397 emit_llvmonly_calli (cfg, fsig, sp, addr);
7399 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7400 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7402 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7405 INLINE_FAILURE ("ctor call");
7406 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7407 callvirt_this_arg, NULL, vtable_arg);
7414 emit_setret (MonoCompile *cfg, MonoInst *val)
7416 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7419 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7422 if (!cfg->vret_addr) {
7423 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7425 EMIT_NEW_RETLOADA (cfg, ret_addr);
7427 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7428 ins->klass = mono_class_from_mono_type (ret_type);
7431 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7432 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7433 MonoInst *iargs [1];
7437 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7438 mono_arch_emit_setret (cfg, cfg->method, conv);
7440 mono_arch_emit_setret (cfg, cfg->method, val);
7443 mono_arch_emit_setret (cfg, cfg->method, val);
7449 * mono_method_to_ir:
7451 * Translate the .net IL into linear IR.
7453 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7454 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7455 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7456 * @inline_args: if not NULL, contains the arguments to the inline call
7457 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7458 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7460 * This method is used to turn ECMA IL into Mono's internal Linear IR
7461 * reprensetation. It is used both for entire methods, as well as
7462 * inlining existing methods. In the former case, the @start_bblock,
7463 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7464 * inline_offset is set to zero.
7466 * Returns: the inline cost, or -1 if there was an error processing this method.
7469 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7470 MonoInst *return_var, MonoInst **inline_args,
7471 guint inline_offset, gboolean is_virtual_call)
7474 MonoInst *ins, **sp, **stack_start;
7475 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7476 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7477 MonoMethod *cmethod, *method_definition;
7478 MonoInst **arg_array;
7479 MonoMethodHeader *header;
7481 guint32 token, ins_flag;
7483 MonoClass *constrained_class = NULL;
7484 unsigned char *ip, *end, *target, *err_pos;
7485 MonoMethodSignature *sig;
7486 MonoGenericContext *generic_context = NULL;
7487 MonoGenericContainer *generic_container = NULL;
7488 MonoType **param_types;
7489 int i, n, start_new_bblock, dreg;
7490 int num_calls = 0, inline_costs = 0;
7491 int breakpoint_id = 0;
7493 GSList *class_inits = NULL;
7494 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7496 gboolean init_locals, seq_points, skip_dead_blocks;
7497 gboolean sym_seq_points = FALSE;
7498 MonoDebugMethodInfo *minfo;
7499 MonoBitSet *seq_point_locs = NULL;
7500 MonoBitSet *seq_point_set_locs = NULL;
7502 cfg->disable_inline = is_jit_optimizer_disabled (method);
7504 /* serialization and xdomain stuff may need access to private fields and methods */
7505 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7506 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7507 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7508 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7509 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7510 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7512 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7513 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7514 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7515 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7516 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7518 image = method->klass->image;
7519 header = mono_method_get_header_checked (method, &cfg->error);
7521 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7522 goto exception_exit;
7524 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7527 generic_container = mono_method_get_generic_container (method);
7528 sig = mono_method_signature (method);
7529 num_args = sig->hasthis + sig->param_count;
7530 ip = (unsigned char*)header->code;
7531 cfg->cil_start = ip;
7532 end = ip + header->code_size;
7533 cfg->stat_cil_code_size += header->code_size;
7535 seq_points = cfg->gen_seq_points && cfg->method == method;
7537 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7538 /* We could hit a seq point before attaching to the JIT (#8338) */
7542 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7543 minfo = mono_debug_lookup_method (method);
7545 MonoSymSeqPoint *sps;
7546 int i, n_il_offsets;
7548 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7549 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7550 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7551 sym_seq_points = TRUE;
7552 for (i = 0; i < n_il_offsets; ++i) {
7553 if (sps [i].il_offset < header->code_size)
7554 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7558 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7560 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7562 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7563 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7565 mono_debug_free_method_async_debug_info (asyncMethod);
7567 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7568 /* Methods without line number info like auto-generated property accessors */
7569 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7570 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7571 sym_seq_points = TRUE;
7576 * Methods without init_locals set could cause asserts in various passes
7577 * (#497220). To work around this, we emit dummy initialization opcodes
7578 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7579 * on some platforms.
7581 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7582 init_locals = header->init_locals;
7586 method_definition = method;
7587 while (method_definition->is_inflated) {
7588 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7589 method_definition = imethod->declaring;
7592 /* SkipVerification is not allowed if core-clr is enabled */
7593 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7595 dont_verify_stloc = TRUE;
7598 if (sig->is_inflated)
7599 generic_context = mono_method_get_context (method);
7600 else if (generic_container)
7601 generic_context = &generic_container->context;
7602 cfg->generic_context = generic_context;
7605 g_assert (!sig->has_type_parameters);
7607 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7608 g_assert (method->is_inflated);
7609 g_assert (mono_method_get_context (method)->method_inst);
7611 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7612 g_assert (sig->generic_param_count);
7614 if (cfg->method == method) {
7615 cfg->real_offset = 0;
7617 cfg->real_offset = inline_offset;
7620 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7621 cfg->cil_offset_to_bb_len = header->code_size;
7623 cfg->current_method = method;
7625 if (cfg->verbose_level > 2)
7626 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7628 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7630 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7631 for (n = 0; n < sig->param_count; ++n)
7632 param_types [n + sig->hasthis] = sig->params [n];
7633 cfg->arg_types = param_types;
7635 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7636 if (cfg->method == method) {
7638 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7639 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7642 NEW_BBLOCK (cfg, start_bblock);
7643 cfg->bb_entry = start_bblock;
7644 start_bblock->cil_code = NULL;
7645 start_bblock->cil_length = 0;
7648 NEW_BBLOCK (cfg, end_bblock);
7649 cfg->bb_exit = end_bblock;
7650 end_bblock->cil_code = NULL;
7651 end_bblock->cil_length = 0;
7652 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7653 g_assert (cfg->num_bblocks == 2);
7655 arg_array = cfg->args;
7657 if (header->num_clauses) {
7658 cfg->spvars = g_hash_table_new (NULL, NULL);
7659 cfg->exvars = g_hash_table_new (NULL, NULL);
7661 /* handle exception clauses */
7662 for (i = 0; i < header->num_clauses; ++i) {
7663 MonoBasicBlock *try_bb;
7664 MonoExceptionClause *clause = &header->clauses [i];
7665 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7667 try_bb->real_offset = clause->try_offset;
7668 try_bb->try_start = TRUE;
7669 try_bb->region = ((i + 1) << 8) | clause->flags;
7670 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7671 tblock->real_offset = clause->handler_offset;
7672 tblock->flags |= BB_EXCEPTION_HANDLER;
7675 * Linking the try block with the EH block hinders inlining as we won't be able to
7676 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7678 if (COMPILE_LLVM (cfg))
7679 link_bblock (cfg, try_bb, tblock);
7681 if (*(ip + clause->handler_offset) == CEE_POP)
7682 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7684 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7685 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7686 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7687 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7688 MONO_ADD_INS (tblock, ins);
7690 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7691 /* finally clauses already have a seq point */
7692 /* seq points for filter clauses are emitted below */
7693 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7694 MONO_ADD_INS (tblock, ins);
7697 /* todo: is a fault block unsafe to optimize? */
7698 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7699 tblock->flags |= BB_EXCEPTION_UNSAFE;
7702 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7704 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7706 /* catch and filter blocks get the exception object on the stack */
7707 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7708 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7710 /* mostly like handle_stack_args (), but just sets the input args */
7711 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7712 tblock->in_scount = 1;
7713 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7714 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7718 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7719 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7720 if (!cfg->compile_llvm) {
7721 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7722 ins->dreg = tblock->in_stack [0]->dreg;
7723 MONO_ADD_INS (tblock, ins);
7726 MonoInst *dummy_use;
7729 * Add a dummy use for the exvar so its liveness info will be
7732 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7735 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7736 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7737 MONO_ADD_INS (tblock, ins);
7740 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7741 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7742 tblock->flags |= BB_EXCEPTION_HANDLER;
7743 tblock->real_offset = clause->data.filter_offset;
7744 tblock->in_scount = 1;
7745 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7746 /* The filter block shares the exvar with the handler block */
7747 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7748 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7749 MONO_ADD_INS (tblock, ins);
7753 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7754 clause->data.catch_class &&
7756 mono_class_check_context_used (clause->data.catch_class)) {
7758 * In shared generic code with catch
7759 * clauses containing type variables
7760 * the exception handling code has to
7761 * be able to get to the rgctx.
7762 * Therefore we have to make sure that
7763 * the vtable/mrgctx argument (for
7764 * static or generic methods) or the
7765 * "this" argument (for non-static
7766 * methods) are live.
7768 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7769 mini_method_get_context (method)->method_inst ||
7770 method->klass->valuetype) {
7771 mono_get_vtable_var (cfg);
7773 MonoInst *dummy_use;
7775 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7780 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7781 cfg->cbb = start_bblock;
7782 cfg->args = arg_array;
7783 mono_save_args (cfg, sig, inline_args);
7786 /* FIRST CODE BLOCK */
7787 NEW_BBLOCK (cfg, tblock);
7788 tblock->cil_code = ip;
7792 ADD_BBLOCK (cfg, tblock);
7794 if (cfg->method == method) {
7795 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7796 if (breakpoint_id) {
7797 MONO_INST_NEW (cfg, ins, OP_BREAK);
7798 MONO_ADD_INS (cfg->cbb, ins);
7802 /* we use a separate basic block for the initialization code */
7803 NEW_BBLOCK (cfg, init_localsbb);
7804 if (cfg->method == method)
7805 cfg->bb_init = init_localsbb;
7806 init_localsbb->real_offset = cfg->real_offset;
7807 start_bblock->next_bb = init_localsbb;
7808 init_localsbb->next_bb = cfg->cbb;
7809 link_bblock (cfg, start_bblock, init_localsbb);
7810 link_bblock (cfg, init_localsbb, cfg->cbb);
7812 cfg->cbb = init_localsbb;
7814 if (cfg->gsharedvt && cfg->method == method) {
7815 MonoGSharedVtMethodInfo *info;
7816 MonoInst *var, *locals_var;
7819 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7820 info->method = cfg->method;
7821 info->count_entries = 16;
7822 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7823 cfg->gsharedvt_info = info;
7825 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7826 /* prevent it from being register allocated */
7827 //var->flags |= MONO_INST_VOLATILE;
7828 cfg->gsharedvt_info_var = var;
7830 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7831 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7833 /* Allocate locals */
7834 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7835 /* prevent it from being register allocated */
7836 //locals_var->flags |= MONO_INST_VOLATILE;
7837 cfg->gsharedvt_locals_var = locals_var;
7839 dreg = alloc_ireg (cfg);
7840 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7842 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7843 ins->dreg = locals_var->dreg;
7845 MONO_ADD_INS (cfg->cbb, ins);
7846 cfg->gsharedvt_locals_var_ins = ins;
7848 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7851 ins->flags |= MONO_INST_INIT;
7855 if (mono_security_core_clr_enabled ()) {
7856 /* check if this is native code, e.g. an icall or a p/invoke */
7857 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7858 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7860 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7861 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7863 /* if this ia a native call then it can only be JITted from platform code */
7864 if ((icall || pinvk) && method->klass && method->klass->image) {
7865 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7866 MonoException *ex = icall ? mono_get_exception_security () :
7867 mono_get_exception_method_access ();
7868 emit_throw_exception (cfg, ex);
7875 CHECK_CFG_EXCEPTION;
7877 if (header->code_size == 0)
7880 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7885 if (cfg->method == method)
7886 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7888 for (n = 0; n < header->num_locals; ++n) {
7889 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7894 /* We force the vtable variable here for all shared methods
7895 for the possibility that they might show up in a stack
7896 trace where their exact instantiation is needed. */
7897 if (cfg->gshared && method == cfg->method) {
7898 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7899 mini_method_get_context (method)->method_inst ||
7900 method->klass->valuetype) {
7901 mono_get_vtable_var (cfg);
7903 /* FIXME: Is there a better way to do this?
7904 We need the variable live for the duration
7905 of the whole method. */
7906 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7910 /* add a check for this != NULL to inlined methods */
7911 if (is_virtual_call) {
7914 NEW_ARGLOAD (cfg, arg_ins, 0);
7915 MONO_ADD_INS (cfg->cbb, arg_ins);
7916 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7919 skip_dead_blocks = !dont_verify;
7920 if (skip_dead_blocks) {
7921 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7926 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7927 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7930 start_new_bblock = 0;
7932 if (cfg->method == method)
7933 cfg->real_offset = ip - header->code;
7935 cfg->real_offset = inline_offset;
7940 if (start_new_bblock) {
7941 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7942 if (start_new_bblock == 2) {
7943 g_assert (ip == tblock->cil_code);
7945 GET_BBLOCK (cfg, tblock, ip);
7947 cfg->cbb->next_bb = tblock;
7949 start_new_bblock = 0;
7950 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7951 if (cfg->verbose_level > 3)
7952 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7953 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7957 g_slist_free (class_inits);
7960 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7961 link_bblock (cfg, cfg->cbb, tblock);
7962 if (sp != stack_start) {
7963 handle_stack_args (cfg, stack_start, sp - stack_start);
7965 CHECK_UNVERIFIABLE (cfg);
7967 cfg->cbb->next_bb = tblock;
7969 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7970 if (cfg->verbose_level > 3)
7971 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7972 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7975 g_slist_free (class_inits);
7980 if (skip_dead_blocks) {
7981 int ip_offset = ip - header->code;
7983 if (ip_offset == bb->end)
7987 int op_size = mono_opcode_size (ip, end);
7988 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7990 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7992 if (ip_offset + op_size == bb->end) {
7993 MONO_INST_NEW (cfg, ins, OP_NOP);
7994 MONO_ADD_INS (cfg->cbb, ins);
7995 start_new_bblock = 1;
8003 * Sequence points are points where the debugger can place a breakpoint.
8004 * Currently, we generate these automatically at points where the IL
8007 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8009 * Make methods interruptable at the beginning, and at the targets of
8010 * backward branches.
8011 * Also, do this at the start of every bblock in methods with clauses too,
8012 * to be able to handle instructions with inprecise control flow like
8014 * Backward branches are handled at the end of method-to-ir ().
8016 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8017 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8019 /* Avoid sequence points on empty IL like .volatile */
8020 // FIXME: Enable this
8021 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8022 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8023 if ((sp != stack_start) && !sym_seq_point)
8024 ins->flags |= MONO_INST_NONEMPTY_STACK;
8025 MONO_ADD_INS (cfg->cbb, ins);
8028 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8031 cfg->cbb->real_offset = cfg->real_offset;
8033 if ((cfg->method == method) && cfg->coverage_info) {
8034 guint32 cil_offset = ip - header->code;
8035 cfg->coverage_info->data [cil_offset].cil_code = ip;
8037 /* TODO: Use an increment here */
8038 #if defined(TARGET_X86)
8039 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8040 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8042 MONO_ADD_INS (cfg->cbb, ins);
8044 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8045 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8049 if (cfg->verbose_level > 3)
8050 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8054 if (seq_points && !sym_seq_points && sp != stack_start) {
8056 * The C# compiler uses these nops to notify the JIT that it should
8057 * insert seq points.
8059 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8060 MONO_ADD_INS (cfg->cbb, ins);
8062 if (cfg->keep_cil_nops)
8063 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8065 MONO_INST_NEW (cfg, ins, OP_NOP);
8067 MONO_ADD_INS (cfg->cbb, ins);
8070 if (should_insert_brekpoint (cfg->method)) {
8071 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8073 MONO_INST_NEW (cfg, ins, OP_NOP);
8076 MONO_ADD_INS (cfg->cbb, ins);
8082 CHECK_STACK_OVF (1);
8083 n = (*ip)-CEE_LDARG_0;
8085 EMIT_NEW_ARGLOAD (cfg, ins, n);
8093 CHECK_STACK_OVF (1);
8094 n = (*ip)-CEE_LDLOC_0;
8096 EMIT_NEW_LOCLOAD (cfg, ins, n);
8105 n = (*ip)-CEE_STLOC_0;
8108 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8110 emit_stloc_ir (cfg, sp, header, n);
8117 CHECK_STACK_OVF (1);
8120 EMIT_NEW_ARGLOAD (cfg, ins, n);
8126 CHECK_STACK_OVF (1);
8129 NEW_ARGLOADA (cfg, ins, n);
8130 MONO_ADD_INS (cfg->cbb, ins);
8140 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8142 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8147 CHECK_STACK_OVF (1);
8150 EMIT_NEW_LOCLOAD (cfg, ins, n);
8154 case CEE_LDLOCA_S: {
8155 unsigned char *tmp_ip;
8157 CHECK_STACK_OVF (1);
8158 CHECK_LOCAL (ip [1]);
8160 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8166 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8175 CHECK_LOCAL (ip [1]);
8176 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8178 emit_stloc_ir (cfg, sp, header, ip [1]);
8183 CHECK_STACK_OVF (1);
8184 EMIT_NEW_PCONST (cfg, ins, NULL);
8185 ins->type = STACK_OBJ;
8190 CHECK_STACK_OVF (1);
8191 EMIT_NEW_ICONST (cfg, ins, -1);
8204 CHECK_STACK_OVF (1);
8205 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8211 CHECK_STACK_OVF (1);
8213 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8219 CHECK_STACK_OVF (1);
8220 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8226 CHECK_STACK_OVF (1);
8227 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8228 ins->type = STACK_I8;
8229 ins->dreg = alloc_dreg (cfg, STACK_I8);
8231 ins->inst_l = (gint64)read64 (ip);
8232 MONO_ADD_INS (cfg->cbb, ins);
8238 gboolean use_aotconst = FALSE;
8240 #ifdef TARGET_POWERPC
8241 /* FIXME: Clean this up */
8242 if (cfg->compile_aot)
8243 use_aotconst = TRUE;
8246 /* FIXME: we should really allocate this only late in the compilation process */
8247 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8249 CHECK_STACK_OVF (1);
8255 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8257 dreg = alloc_freg (cfg);
8258 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8259 ins->type = cfg->r4_stack_type;
8261 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8262 ins->type = cfg->r4_stack_type;
8263 ins->dreg = alloc_dreg (cfg, STACK_R8);
8265 MONO_ADD_INS (cfg->cbb, ins);
8275 gboolean use_aotconst = FALSE;
8277 #ifdef TARGET_POWERPC
8278 /* FIXME: Clean this up */
8279 if (cfg->compile_aot)
8280 use_aotconst = TRUE;
8283 /* FIXME: we should really allocate this only late in the compilation process */
8284 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8286 CHECK_STACK_OVF (1);
8292 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8294 dreg = alloc_freg (cfg);
8295 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8296 ins->type = STACK_R8;
8298 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8299 ins->type = STACK_R8;
8300 ins->dreg = alloc_dreg (cfg, STACK_R8);
8302 MONO_ADD_INS (cfg->cbb, ins);
8311 MonoInst *temp, *store;
8313 CHECK_STACK_OVF (1);
8317 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8318 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8320 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8323 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8336 if (sp [0]->type == STACK_R8)
8337 /* we need to pop the value from the x86 FP stack */
8338 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8343 MonoMethodSignature *fsig;
8346 INLINE_FAILURE ("jmp");
8347 GSHAREDVT_FAILURE (*ip);
8350 if (stack_start != sp)
8352 token = read32 (ip + 1);
8353 /* FIXME: check the signature matches */
8354 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8357 if (cfg->gshared && mono_method_check_context_used (cmethod))
8358 GENERIC_SHARING_FAILURE (CEE_JMP);
8360 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8362 fsig = mono_method_signature (cmethod);
8363 n = fsig->param_count + fsig->hasthis;
8364 if (cfg->llvm_only) {
8367 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8368 for (i = 0; i < n; ++i)
8369 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8370 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8372 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8373 * have to emit a normal return since llvm expects it.
8376 emit_setret (cfg, ins);
8377 MONO_INST_NEW (cfg, ins, OP_BR);
8378 ins->inst_target_bb = end_bblock;
8379 MONO_ADD_INS (cfg->cbb, ins);
8380 link_bblock (cfg, cfg->cbb, end_bblock);
8383 } else if (cfg->backend->have_op_tail_call) {
8384 /* Handle tail calls similarly to calls */
8387 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8388 call->method = cmethod;
8389 call->tail_call = TRUE;
8390 call->signature = mono_method_signature (cmethod);
8391 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8392 call->inst.inst_p0 = cmethod;
8393 for (i = 0; i < n; ++i)
8394 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8396 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8397 call->vret_var = cfg->vret_addr;
8399 mono_arch_emit_call (cfg, call);
8400 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8401 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8403 for (i = 0; i < num_args; ++i)
8404 /* Prevent arguments from being optimized away */
8405 arg_array [i]->flags |= MONO_INST_VOLATILE;
8407 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8408 ins = (MonoInst*)call;
8409 ins->inst_p0 = cmethod;
8410 MONO_ADD_INS (cfg->cbb, ins);
8414 start_new_bblock = 1;
8419 MonoMethodSignature *fsig;
8422 token = read32 (ip + 1);
8426 //GSHAREDVT_FAILURE (*ip);
8431 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8434 if (method->dynamic && fsig->pinvoke) {
8438 * This is a call through a function pointer using a pinvoke
8439 * signature. Have to create a wrapper and call that instead.
8440 * FIXME: This is very slow, need to create a wrapper at JIT time
8441 * instead based on the signature.
8443 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8444 EMIT_NEW_PCONST (cfg, args [1], fsig);
8446 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8449 n = fsig->param_count + fsig->hasthis;
8453 //g_assert (!virtual_ || fsig->hasthis);
8457 inline_costs += 10 * num_calls++;
8460 * Making generic calls out of gsharedvt methods.
8461 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8462 * patching gshared method addresses into a gsharedvt method.
8464 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8466 * We pass the address to the gsharedvt trampoline in the rgctx reg
8468 MonoInst *callee = addr;
8470 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8472 GSHAREDVT_FAILURE (*ip);
8476 GSHAREDVT_FAILURE (*ip);
8478 addr = emit_get_rgctx_sig (cfg, context_used,
8479 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8480 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8484 /* Prevent inlining of methods with indirect calls */
8485 INLINE_FAILURE ("indirect call");
8487 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8488 MonoJumpInfoType info_type;
8492 * Instead of emitting an indirect call, emit a direct call
8493 * with the contents of the aotconst as the patch info.
8495 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8496 info_type = (MonoJumpInfoType)addr->inst_c1;
8497 info_data = addr->inst_p0;
8499 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8500 info_data = addr->inst_right->inst_left;
8503 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8504 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8507 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8508 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8513 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8517 /* End of call, INS should contain the result of the call, if any */
8519 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8521 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8524 CHECK_CFG_EXCEPTION;
8528 constrained_class = NULL;
8532 case CEE_CALLVIRT: {
8533 MonoInst *addr = NULL;
8534 MonoMethodSignature *fsig = NULL;
8536 int virtual_ = *ip == CEE_CALLVIRT;
8537 gboolean pass_imt_from_rgctx = FALSE;
8538 MonoInst *imt_arg = NULL;
8539 MonoInst *keep_this_alive = NULL;
8540 gboolean pass_vtable = FALSE;
8541 gboolean pass_mrgctx = FALSE;
8542 MonoInst *vtable_arg = NULL;
8543 gboolean check_this = FALSE;
8544 gboolean supported_tail_call = FALSE;
8545 gboolean tail_call = FALSE;
8546 gboolean need_seq_point = FALSE;
8547 guint32 call_opcode = *ip;
8548 gboolean emit_widen = TRUE;
8549 gboolean push_res = TRUE;
8550 gboolean skip_ret = FALSE;
8551 gboolean delegate_invoke = FALSE;
8552 gboolean direct_icall = FALSE;
8553 gboolean constrained_partial_call = FALSE;
8554 MonoMethod *cil_method;
8557 token = read32 (ip + 1);
8561 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8564 cil_method = cmethod;
8566 if (constrained_class) {
8567 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8568 if (!mini_is_gsharedvt_klass (constrained_class)) {
8569 g_assert (!cmethod->klass->valuetype);
8570 if (!mini_type_is_reference (&constrained_class->byval_arg))
8571 constrained_partial_call = TRUE;
8575 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8576 if (cfg->verbose_level > 2)
8577 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8578 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8579 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8581 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8585 if (cfg->verbose_level > 2)
8586 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8588 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8590 * This is needed since get_method_constrained can't find
8591 * the method in klass representing a type var.
8592 * The type var is guaranteed to be a reference type in this
8595 if (!mini_is_gsharedvt_klass (constrained_class))
8596 g_assert (!cmethod->klass->valuetype);
8598 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8603 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8604 /* Use the corresponding method from the base type to avoid boxing */
8605 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8606 g_assert (base_type);
8607 constrained_class = mono_class_from_mono_type (base_type);
8608 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8613 if (!dont_verify && !cfg->skip_visibility) {
8614 MonoMethod *target_method = cil_method;
8615 if (method->is_inflated) {
8616 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8619 if (!mono_method_can_access_method (method_definition, target_method) &&
8620 !mono_method_can_access_method (method, cil_method))
8621 emit_method_access_failure (cfg, method, cil_method);
8624 if (mono_security_core_clr_enabled ())
8625 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8627 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8628 /* MS.NET seems to silently convert this to a callvirt */
8633 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8634 * converts to a callvirt.
8636 * tests/bug-515884.il is an example of this behavior
8638 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8639 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8640 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8644 if (!cmethod->klass->inited)
8645 if (!mono_class_init (cmethod->klass))
8646 TYPE_LOAD_ERROR (cmethod->klass);
8648 fsig = mono_method_signature (cmethod);
8651 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8652 mini_class_is_system_array (cmethod->klass)) {
8653 array_rank = cmethod->klass->rank;
8654 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8655 direct_icall = TRUE;
8656 } else if (fsig->pinvoke) {
8657 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8658 fsig = mono_method_signature (wrapper);
8659 } else if (constrained_class) {
8661 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8665 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8666 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8668 /* See code below */
8669 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8670 MonoBasicBlock *tbb;
8672 GET_BBLOCK (cfg, tbb, ip + 5);
8673 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8675 * We want to extend the try block to cover the call, but we can't do it if the
8676 * call is made directly since its followed by an exception check.
8678 direct_icall = FALSE;
8682 mono_save_token_info (cfg, image, token, cil_method);
8684 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8685 need_seq_point = TRUE;
8687 /* Don't support calls made using type arguments for now */
8689 if (cfg->gsharedvt) {
8690 if (mini_is_gsharedvt_signature (fsig))
8691 GSHAREDVT_FAILURE (*ip);
8695 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8696 g_assert_not_reached ();
8698 n = fsig->param_count + fsig->hasthis;
8700 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8704 g_assert (!mono_method_check_context_used (cmethod));
8708 //g_assert (!virtual_ || fsig->hasthis);
8713 * We have the `constrained.' prefix opcode.
8715 if (constrained_class) {
8716 if (mini_is_gsharedvt_klass (constrained_class)) {
8717 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8718 /* The 'Own method' case below */
8719 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8720 /* 'The type parameter is instantiated as a reference type' case below. */
8722 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8723 CHECK_CFG_EXCEPTION;
8729 if (constrained_partial_call) {
8730 gboolean need_box = TRUE;
8733 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8734 * called method is not known at compile time either. The called method could end up being
8735 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8736 * to box the receiver.
8737 * A simple solution would be to box always and make a normal virtual call, but that would
8738 * be bad performance wise.
8740 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8742 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8747 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8748 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8749 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8750 ins->klass = constrained_class;
8751 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8752 CHECK_CFG_EXCEPTION;
8753 } else if (need_box) {
8755 MonoBasicBlock *is_ref_bb, *end_bb;
8756 MonoInst *nonbox_call;
8759 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8761 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8762 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8764 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8766 NEW_BBLOCK (cfg, is_ref_bb);
8767 NEW_BBLOCK (cfg, end_bb);
8769 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8770 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8774 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8776 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8779 MONO_START_BB (cfg, is_ref_bb);
8780 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8781 ins->klass = constrained_class;
8782 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8783 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8785 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8787 MONO_START_BB (cfg, end_bb);
8790 nonbox_call->dreg = ins->dreg;
8793 g_assert (mono_class_is_interface (cmethod->klass));
8794 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8795 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8798 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8800 * The type parameter is instantiated as a valuetype,
8801 * but that type doesn't override the method we're
8802 * calling, so we need to box `this'.
8804 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8805 ins->klass = constrained_class;
8806 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8807 CHECK_CFG_EXCEPTION;
8808 } else if (!constrained_class->valuetype) {
8809 int dreg = alloc_ireg_ref (cfg);
8812 * The type parameter is instantiated as a reference
8813 * type. We have a managed pointer on the stack, so
8814 * we need to dereference it here.
8816 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8817 ins->type = STACK_OBJ;
8820 if (cmethod->klass->valuetype) {
8823 /* Interface method */
8826 mono_class_setup_vtable (constrained_class);
8827 CHECK_TYPELOAD (constrained_class);
8828 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8830 TYPE_LOAD_ERROR (constrained_class);
8831 slot = mono_method_get_vtable_slot (cmethod);
8833 TYPE_LOAD_ERROR (cmethod->klass);
8834 cmethod = constrained_class->vtable [ioffset + slot];
8836 if (cmethod->klass == mono_defaults.enum_class) {
8837 /* Enum implements some interfaces, so treat this as the first case */
8838 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8839 ins->klass = constrained_class;
8840 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8841 CHECK_CFG_EXCEPTION;
8846 constrained_class = NULL;
8849 if (check_call_signature (cfg, fsig, sp))
8852 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8853 delegate_invoke = TRUE;
8855 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8856 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8857 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8865 * If the callee is a shared method, then its static cctor
8866 * might not get called after the call was patched.
8868 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8869 emit_class_init (cfg, cmethod->klass);
8870 CHECK_TYPELOAD (cmethod->klass);
8873 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8876 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8878 context_used = mini_method_check_context_used (cfg, cmethod);
8880 if (context_used && mono_class_is_interface (cmethod->klass)) {
8881 /* Generic method interface
8882 calls are resolved via a
8883 helper function and don't
8885 if (!cmethod_context || !cmethod_context->method_inst)
8886 pass_imt_from_rgctx = TRUE;
8890 * If a shared method calls another
8891 * shared method then the caller must
8892 * have a generic sharing context
8893 * because the magic trampoline
8894 * requires it. FIXME: We shouldn't
8895 * have to force the vtable/mrgctx
8896 * variable here. Instead there
8897 * should be a flag in the cfg to
8898 * request a generic sharing context.
8901 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8902 mono_get_vtable_var (cfg);
8907 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8909 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8911 CHECK_TYPELOAD (cmethod->klass);
8912 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8917 g_assert (!vtable_arg);
8919 if (!cfg->compile_aot) {
8921 * emit_get_rgctx_method () calls mono_class_vtable () so check
8922 * for type load errors before.
8924 mono_class_setup_vtable (cmethod->klass);
8925 CHECK_TYPELOAD (cmethod->klass);
8928 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8930 /* !marshalbyref is needed to properly handle generic methods + remoting */
8931 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8932 MONO_METHOD_IS_FINAL (cmethod)) &&
8933 !mono_class_is_marshalbyref (cmethod->klass)) {
8940 if (pass_imt_from_rgctx) {
8941 g_assert (!pass_vtable);
8943 imt_arg = emit_get_rgctx_method (cfg, context_used,
8944 cmethod, MONO_RGCTX_INFO_METHOD);
8948 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8950 /* Calling virtual generic methods */
8951 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8952 !(MONO_METHOD_IS_FINAL (cmethod) &&
8953 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8954 fsig->generic_param_count &&
8955 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8957 MonoInst *this_temp, *this_arg_temp, *store;
8958 MonoInst *iargs [4];
8960 g_assert (fsig->is_inflated);
8962 /* Prevent inlining of methods that contain indirect calls */
8963 INLINE_FAILURE ("virtual generic call");
8965 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8966 GSHAREDVT_FAILURE (*ip);
8968 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8969 g_assert (!imt_arg);
8971 g_assert (cmethod->is_inflated);
8972 imt_arg = emit_get_rgctx_method (cfg, context_used,
8973 cmethod, MONO_RGCTX_INFO_METHOD);
8974 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8976 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8977 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8978 MONO_ADD_INS (cfg->cbb, store);
8980 /* FIXME: This should be a managed pointer */
8981 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8983 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8984 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8985 cmethod, MONO_RGCTX_INFO_METHOD);
8986 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8987 addr = mono_emit_jit_icall (cfg,
8988 mono_helper_compile_generic_method, iargs);
8990 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8992 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8999 * Implement a workaround for the inherent races involved in locking:
9005 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9006 * try block, the Exit () won't be executed, see:
9007 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9008 * To work around this, we extend such try blocks to include the last x bytes
9009 * of the Monitor.Enter () call.
9011 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9012 MonoBasicBlock *tbb;
9014 GET_BBLOCK (cfg, tbb, ip + 5);
9016 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9017 * from Monitor.Enter like ArgumentNullException.
9019 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9020 /* Mark this bblock as needing to be extended */
9021 tbb->extend_try_block = TRUE;
9025 /* Conversion to a JIT intrinsic */
9026 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9027 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9028 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9036 if ((cfg->opt & MONO_OPT_INLINE) &&
9037 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9038 mono_method_check_inlining (cfg, cmethod)) {
9040 gboolean always = FALSE;
9042 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9043 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9044 /* Prevent inlining of methods that call wrappers */
9045 INLINE_FAILURE ("wrapper call");
9046 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9050 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9052 cfg->real_offset += 5;
9054 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9055 /* *sp is already set by inline_method */
9060 inline_costs += costs;
9066 /* Tail recursion elimination */
9067 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9068 gboolean has_vtargs = FALSE;
9071 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9072 INLINE_FAILURE ("tail call");
9074 /* keep it simple */
9075 for (i = fsig->param_count - 1; i >= 0; i--) {
9076 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9081 if (need_seq_point) {
9082 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9083 need_seq_point = FALSE;
9085 for (i = 0; i < n; ++i)
9086 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9087 MONO_INST_NEW (cfg, ins, OP_BR);
9088 MONO_ADD_INS (cfg->cbb, ins);
9089 tblock = start_bblock->out_bb [0];
9090 link_bblock (cfg, cfg->cbb, tblock);
9091 ins->inst_target_bb = tblock;
9092 start_new_bblock = 1;
9094 /* skip the CEE_RET, too */
9095 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9102 inline_costs += 10 * num_calls++;
9105 * Synchronized wrappers.
9106 * Its hard to determine where to replace a method with its synchronized
9107 * wrapper without causing an infinite recursion. The current solution is
9108 * to add the synchronized wrapper in the trampolines, and to
9109 * change the called method to a dummy wrapper, and resolve that wrapper
9110 * to the real method in mono_jit_compile_method ().
9112 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9113 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9114 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9115 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9119 * Making generic calls out of gsharedvt methods.
9120 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9121 * patching gshared method addresses into a gsharedvt method.
9123 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9124 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9125 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9126 MonoRgctxInfoType info_type;
9129 //if (mono_class_is_interface (cmethod->klass))
9130 //GSHAREDVT_FAILURE (*ip);
9131 // disable for possible remoting calls
9132 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9133 GSHAREDVT_FAILURE (*ip);
9134 if (fsig->generic_param_count) {
9135 /* virtual generic call */
9136 g_assert (!imt_arg);
9137 /* Same as the virtual generic case above */
9138 imt_arg = emit_get_rgctx_method (cfg, context_used,
9139 cmethod, MONO_RGCTX_INFO_METHOD);
9140 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9142 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9143 /* This can happen when we call a fully instantiated iface method */
9144 imt_arg = emit_get_rgctx_method (cfg, context_used,
9145 cmethod, MONO_RGCTX_INFO_METHOD);
9150 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9151 keep_this_alive = sp [0];
9153 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9154 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9156 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9157 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9159 if (cfg->llvm_only) {
9160 // FIXME: Avoid initializing vtable_arg
9161 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9163 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9168 /* Generic sharing */
9171 * Use this if the callee is gsharedvt sharable too, since
9172 * at runtime we might find an instantiation so the call cannot
9173 * be patched (the 'no_patch' code path in mini-trampolines.c).
9175 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9176 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9177 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9178 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9179 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9180 INLINE_FAILURE ("gshared");
9182 g_assert (cfg->gshared && cmethod);
9186 * We are compiling a call to a
9187 * generic method from shared code,
9188 * which means that we have to look up
9189 * the method in the rgctx and do an
9193 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9195 if (cfg->llvm_only) {
9196 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9197 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9199 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9200 // FIXME: Avoid initializing imt_arg/vtable_arg
9201 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9203 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9204 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9209 /* Direct calls to icalls */
9211 MonoMethod *wrapper;
9214 /* Inline the wrapper */
9215 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9217 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9218 g_assert (costs > 0);
9219 cfg->real_offset += 5;
9221 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9222 /* *sp is already set by inline_method */
9227 inline_costs += costs;
9236 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9237 MonoInst *val = sp [fsig->param_count];
9239 if (val->type == STACK_OBJ) {
9240 MonoInst *iargs [2];
9245 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9248 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9249 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9250 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9251 emit_write_barrier (cfg, addr, val);
9252 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9253 GSHAREDVT_FAILURE (*ip);
9254 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9255 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9257 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9258 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9259 if (!cmethod->klass->element_class->valuetype && !readonly)
9260 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9261 CHECK_TYPELOAD (cmethod->klass);
9264 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9267 g_assert_not_reached ();
9274 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9278 /* Tail prefix / tail call optimization */
9280 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9281 /* FIXME: runtime generic context pointer for jumps? */
9282 /* FIXME: handle this for generic sharing eventually */
9283 if ((ins_flag & MONO_INST_TAILCALL) &&
9284 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9285 supported_tail_call = TRUE;
9287 if (supported_tail_call) {
9290 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9291 INLINE_FAILURE ("tail call");
9293 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9295 if (cfg->backend->have_op_tail_call) {
9296 /* Handle tail calls similarly to normal calls */
9299 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9301 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9302 call->tail_call = TRUE;
9303 call->method = cmethod;
9304 call->signature = mono_method_signature (cmethod);
9307 * We implement tail calls by storing the actual arguments into the
9308 * argument variables, then emitting a CEE_JMP.
9310 for (i = 0; i < n; ++i) {
9311 /* Prevent argument from being register allocated */
9312 arg_array [i]->flags |= MONO_INST_VOLATILE;
9313 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9315 ins = (MonoInst*)call;
9316 ins->inst_p0 = cmethod;
9317 ins->inst_p1 = arg_array [0];
9318 MONO_ADD_INS (cfg->cbb, ins);
9319 link_bblock (cfg, cfg->cbb, end_bblock);
9320 start_new_bblock = 1;
9322 // FIXME: Eliminate unreachable epilogs
9325 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9326 * only reachable from this call.
9328 GET_BBLOCK (cfg, tblock, ip + 5);
9329 if (tblock == cfg->cbb || tblock->in_count == 0)
9338 * Virtual calls in llvm-only mode.
9340 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9341 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9346 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9347 INLINE_FAILURE ("call");
9348 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9349 imt_arg, vtable_arg);
9351 if (tail_call && !cfg->llvm_only) {
9352 link_bblock (cfg, cfg->cbb, end_bblock);
9353 start_new_bblock = 1;
9355 // FIXME: Eliminate unreachable epilogs
9358 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9359 * only reachable from this call.
9361 GET_BBLOCK (cfg, tblock, ip + 5);
9362 if (tblock == cfg->cbb || tblock->in_count == 0)
9369 /* End of call, INS should contain the result of the call, if any */
9371 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9374 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9379 if (keep_this_alive) {
9380 MonoInst *dummy_use;
9382 /* See mono_emit_method_call_full () */
9383 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9386 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9388 * Clang can convert these calls to tail calls which screw up the stack
9389 * walk. This happens even when the -fno-optimize-sibling-calls
9390 * option is passed to clang.
9391 * Work around this by emitting a dummy call.
9393 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9396 CHECK_CFG_EXCEPTION;
9400 g_assert (*ip == CEE_RET);
9404 constrained_class = NULL;
9406 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9410 if (cfg->method != method) {
9411 /* return from inlined method */
9413 * If in_count == 0, that means the ret is unreachable due to
9414 * being preceeded by a throw. In that case, inline_method () will
9415 * handle setting the return value
9416 * (test case: test_0_inline_throw ()).
9418 if (return_var && cfg->cbb->in_count) {
9419 MonoType *ret_type = mono_method_signature (method)->ret;
9425 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9428 //g_assert (returnvar != -1);
9429 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9430 cfg->ret_var_set = TRUE;
9433 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9435 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9439 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9441 if (seq_points && !sym_seq_points) {
9443 * Place a seq point here too even through the IL stack is not
9444 * empty, so a step over on
9447 * will work correctly.
9449 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9450 MONO_ADD_INS (cfg->cbb, ins);
9453 g_assert (!return_var);
9457 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9460 emit_setret (cfg, *sp);
9463 if (sp != stack_start)
9465 MONO_INST_NEW (cfg, ins, OP_BR);
9467 ins->inst_target_bb = end_bblock;
9468 MONO_ADD_INS (cfg->cbb, ins);
9469 link_bblock (cfg, cfg->cbb, end_bblock);
9470 start_new_bblock = 1;
9474 MONO_INST_NEW (cfg, ins, OP_BR);
9476 target = ip + 1 + (signed char)(*ip);
9478 GET_BBLOCK (cfg, tblock, target);
9479 link_bblock (cfg, cfg->cbb, tblock);
9480 ins->inst_target_bb = tblock;
9481 if (sp != stack_start) {
9482 handle_stack_args (cfg, stack_start, sp - stack_start);
9484 CHECK_UNVERIFIABLE (cfg);
9486 MONO_ADD_INS (cfg->cbb, ins);
9487 start_new_bblock = 1;
9488 inline_costs += BRANCH_COST;
9502 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9504 target = ip + 1 + *(signed char*)ip;
9510 inline_costs += BRANCH_COST;
9514 MONO_INST_NEW (cfg, ins, OP_BR);
9517 target = ip + 4 + (gint32)read32(ip);
9519 GET_BBLOCK (cfg, tblock, target);
9520 link_bblock (cfg, cfg->cbb, tblock);
9521 ins->inst_target_bb = tblock;
9522 if (sp != stack_start) {
9523 handle_stack_args (cfg, stack_start, sp - stack_start);
9525 CHECK_UNVERIFIABLE (cfg);
9528 MONO_ADD_INS (cfg->cbb, ins);
9530 start_new_bblock = 1;
9531 inline_costs += BRANCH_COST;
9538 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9539 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9540 guint32 opsize = is_short ? 1 : 4;
9542 CHECK_OPSIZE (opsize);
9544 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9547 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9552 GET_BBLOCK (cfg, tblock, target);
9553 link_bblock (cfg, cfg->cbb, tblock);
9554 GET_BBLOCK (cfg, tblock, ip);
9555 link_bblock (cfg, cfg->cbb, tblock);
9557 if (sp != stack_start) {
9558 handle_stack_args (cfg, stack_start, sp - stack_start);
9559 CHECK_UNVERIFIABLE (cfg);
9562 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9563 cmp->sreg1 = sp [0]->dreg;
9564 type_from_op (cfg, cmp, sp [0], NULL);
9567 #if SIZEOF_REGISTER == 4
9568 if (cmp->opcode == OP_LCOMPARE_IMM) {
9569 /* Convert it to OP_LCOMPARE */
9570 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9571 ins->type = STACK_I8;
9572 ins->dreg = alloc_dreg (cfg, STACK_I8);
9574 MONO_ADD_INS (cfg->cbb, ins);
9575 cmp->opcode = OP_LCOMPARE;
9576 cmp->sreg2 = ins->dreg;
9579 MONO_ADD_INS (cfg->cbb, cmp);
9581 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9582 type_from_op (cfg, ins, sp [0], NULL);
9583 MONO_ADD_INS (cfg->cbb, ins);
9584 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9585 GET_BBLOCK (cfg, tblock, target);
9586 ins->inst_true_bb = tblock;
9587 GET_BBLOCK (cfg, tblock, ip);
9588 ins->inst_false_bb = tblock;
9589 start_new_bblock = 2;
9592 inline_costs += BRANCH_COST;
9607 MONO_INST_NEW (cfg, ins, *ip);
9609 target = ip + 4 + (gint32)read32(ip);
9615 inline_costs += BRANCH_COST;
9619 MonoBasicBlock **targets;
9620 MonoBasicBlock *default_bblock;
9621 MonoJumpInfoBBTable *table;
9622 int offset_reg = alloc_preg (cfg);
9623 int target_reg = alloc_preg (cfg);
9624 int table_reg = alloc_preg (cfg);
9625 int sum_reg = alloc_preg (cfg);
9626 gboolean use_op_switch;
9630 n = read32 (ip + 1);
9633 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9637 CHECK_OPSIZE (n * sizeof (guint32));
9638 target = ip + n * sizeof (guint32);
9640 GET_BBLOCK (cfg, default_bblock, target);
9641 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9643 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9644 for (i = 0; i < n; ++i) {
9645 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9646 targets [i] = tblock;
9647 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9651 if (sp != stack_start) {
9653 * Link the current bb with the targets as well, so handle_stack_args
9654 * will set their in_stack correctly.
9656 link_bblock (cfg, cfg->cbb, default_bblock);
9657 for (i = 0; i < n; ++i)
9658 link_bblock (cfg, cfg->cbb, targets [i]);
9660 handle_stack_args (cfg, stack_start, sp - stack_start);
9662 CHECK_UNVERIFIABLE (cfg);
9664 /* Undo the links */
9665 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9666 for (i = 0; i < n; ++i)
9667 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9673 for (i = 0; i < n; ++i)
9674 link_bblock (cfg, cfg->cbb, targets [i]);
9676 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9677 table->table = targets;
9678 table->table_size = n;
9680 use_op_switch = FALSE;
9682 /* ARM implements SWITCH statements differently */
9683 /* FIXME: Make it use the generic implementation */
9684 if (!cfg->compile_aot)
9685 use_op_switch = TRUE;
9688 if (COMPILE_LLVM (cfg))
9689 use_op_switch = TRUE;
9691 cfg->cbb->has_jump_table = 1;
9693 if (use_op_switch) {
9694 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9695 ins->sreg1 = src1->dreg;
9696 ins->inst_p0 = table;
9697 ins->inst_many_bb = targets;
9698 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9699 MONO_ADD_INS (cfg->cbb, ins);
9701 if (sizeof (gpointer) == 8)
9702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9706 #if SIZEOF_REGISTER == 8
9707 /* The upper word might not be zero, and we add it to a 64 bit address later */
9708 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9711 if (cfg->compile_aot) {
9712 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9714 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9715 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9716 ins->inst_p0 = table;
9717 ins->dreg = table_reg;
9718 MONO_ADD_INS (cfg->cbb, ins);
9721 /* FIXME: Use load_memindex */
9722 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9724 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9726 start_new_bblock = 1;
9727 inline_costs += (BRANCH_COST * 2);
9747 dreg = alloc_freg (cfg);
9750 dreg = alloc_lreg (cfg);
9753 dreg = alloc_ireg_ref (cfg);
9756 dreg = alloc_preg (cfg);
9759 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9760 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9761 if (*ip == CEE_LDIND_R4)
9762 ins->type = cfg->r4_stack_type;
9763 ins->flags |= ins_flag;
9764 MONO_ADD_INS (cfg->cbb, ins);
9766 if (ins_flag & MONO_INST_VOLATILE) {
9767 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9768 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9784 if (ins_flag & MONO_INST_VOLATILE) {
9785 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9786 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9789 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9790 ins->flags |= ins_flag;
9793 MONO_ADD_INS (cfg->cbb, ins);
9795 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9796 emit_write_barrier (cfg, sp [0], sp [1]);
9805 MONO_INST_NEW (cfg, ins, (*ip));
9807 ins->sreg1 = sp [0]->dreg;
9808 ins->sreg2 = sp [1]->dreg;
9809 type_from_op (cfg, ins, sp [0], sp [1]);
9811 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9813 /* Use the immediate opcodes if possible */
9814 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9815 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9816 if (imm_opcode != -1) {
9817 ins->opcode = imm_opcode;
9818 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9821 NULLIFY_INS (sp [1]);
9825 MONO_ADD_INS ((cfg)->cbb, (ins));
9827 *sp++ = mono_decompose_opcode (cfg, ins);
9844 MONO_INST_NEW (cfg, ins, (*ip));
9846 ins->sreg1 = sp [0]->dreg;
9847 ins->sreg2 = sp [1]->dreg;
9848 type_from_op (cfg, ins, sp [0], sp [1]);
9850 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9851 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9853 /* FIXME: Pass opcode to is_inst_imm */
9855 /* Use the immediate opcodes if possible */
9856 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9857 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9858 if (imm_opcode != -1) {
9859 ins->opcode = imm_opcode;
9860 if (sp [1]->opcode == OP_I8CONST) {
9861 #if SIZEOF_REGISTER == 8
9862 ins->inst_imm = sp [1]->inst_l;
9864 ins->inst_ls_word = sp [1]->inst_ls_word;
9865 ins->inst_ms_word = sp [1]->inst_ms_word;
9869 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9872 /* Might be followed by an instruction added by add_widen_op */
9873 if (sp [1]->next == NULL)
9874 NULLIFY_INS (sp [1]);
9877 MONO_ADD_INS ((cfg)->cbb, (ins));
9879 *sp++ = mono_decompose_opcode (cfg, ins);
9892 case CEE_CONV_OVF_I8:
9893 case CEE_CONV_OVF_U8:
9897 /* Special case this earlier so we have long constants in the IR */
9898 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9899 int data = sp [-1]->inst_c0;
9900 sp [-1]->opcode = OP_I8CONST;
9901 sp [-1]->type = STACK_I8;
9902 #if SIZEOF_REGISTER == 8
9903 if ((*ip) == CEE_CONV_U8)
9904 sp [-1]->inst_c0 = (guint32)data;
9906 sp [-1]->inst_c0 = data;
9908 sp [-1]->inst_ls_word = data;
9909 if ((*ip) == CEE_CONV_U8)
9910 sp [-1]->inst_ms_word = 0;
9912 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9914 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9921 case CEE_CONV_OVF_I4:
9922 case CEE_CONV_OVF_I1:
9923 case CEE_CONV_OVF_I2:
9924 case CEE_CONV_OVF_I:
9925 case CEE_CONV_OVF_U:
9928 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9929 ADD_UNOP (CEE_CONV_OVF_I8);
9936 case CEE_CONV_OVF_U1:
9937 case CEE_CONV_OVF_U2:
9938 case CEE_CONV_OVF_U4:
9941 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9942 ADD_UNOP (CEE_CONV_OVF_U8);
9949 case CEE_CONV_OVF_I1_UN:
9950 case CEE_CONV_OVF_I2_UN:
9951 case CEE_CONV_OVF_I4_UN:
9952 case CEE_CONV_OVF_I8_UN:
9953 case CEE_CONV_OVF_U1_UN:
9954 case CEE_CONV_OVF_U2_UN:
9955 case CEE_CONV_OVF_U4_UN:
9956 case CEE_CONV_OVF_U8_UN:
9957 case CEE_CONV_OVF_I_UN:
9958 case CEE_CONV_OVF_U_UN:
9965 CHECK_CFG_EXCEPTION;
9969 case CEE_ADD_OVF_UN:
9971 case CEE_MUL_OVF_UN:
9973 case CEE_SUB_OVF_UN:
9979 GSHAREDVT_FAILURE (*ip);
9982 token = read32 (ip + 1);
9983 klass = mini_get_class (method, token, generic_context);
9984 CHECK_TYPELOAD (klass);
9986 if (generic_class_is_reference_type (cfg, klass)) {
9987 MonoInst *store, *load;
9988 int dreg = alloc_ireg_ref (cfg);
9990 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9991 load->flags |= ins_flag;
9992 MONO_ADD_INS (cfg->cbb, load);
9994 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9995 store->flags |= ins_flag;
9996 MONO_ADD_INS (cfg->cbb, store);
9998 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9999 emit_write_barrier (cfg, sp [0], sp [1]);
10001 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10007 int loc_index = -1;
10013 token = read32 (ip + 1);
10014 klass = mini_get_class (method, token, generic_context);
10015 CHECK_TYPELOAD (klass);
10017 /* Optimize the common ldobj+stloc combination */
10020 loc_index = ip [6];
10027 loc_index = ip [5] - CEE_STLOC_0;
10034 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10035 CHECK_LOCAL (loc_index);
10037 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10038 ins->dreg = cfg->locals [loc_index]->dreg;
10039 ins->flags |= ins_flag;
10042 if (ins_flag & MONO_INST_VOLATILE) {
10043 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10044 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10050 /* Optimize the ldobj+stobj combination */
10051 /* The reference case ends up being a load+store anyway */
10052 /* Skip this if the operation is volatile. */
10053 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10058 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10065 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10066 ins->flags |= ins_flag;
10069 if (ins_flag & MONO_INST_VOLATILE) {
10070 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10071 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10080 CHECK_STACK_OVF (1);
10082 n = read32 (ip + 1);
10084 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10085 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10086 ins->type = STACK_OBJ;
10089 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10090 MonoInst *iargs [1];
10091 char *str = (char *)mono_method_get_wrapper_data (method, n);
10093 if (cfg->compile_aot)
10094 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10096 EMIT_NEW_PCONST (cfg, iargs [0], str);
10097 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10099 if (cfg->opt & MONO_OPT_SHARED) {
10100 MonoInst *iargs [3];
10102 if (cfg->compile_aot) {
10103 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10105 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10106 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10107 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10108 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10109 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10112 if (cfg->cbb->out_of_line) {
10113 MonoInst *iargs [2];
10115 if (image == mono_defaults.corlib) {
10117 * Avoid relocations in AOT and save some space by using a
10118 * version of helper_ldstr specialized to mscorlib.
10120 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10121 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10123 /* Avoid creating the string object */
10124 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10125 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10126 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10130 if (cfg->compile_aot) {
10131 NEW_LDSTRCONST (cfg, ins, image, n);
10133 MONO_ADD_INS (cfg->cbb, ins);
10136 NEW_PCONST (cfg, ins, NULL);
10137 ins->type = STACK_OBJ;
10138 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10142 OUT_OF_MEMORY_FAILURE;
10145 MONO_ADD_INS (cfg->cbb, ins);
10154 MonoInst *iargs [2];
10155 MonoMethodSignature *fsig;
10158 MonoInst *vtable_arg = NULL;
10161 token = read32 (ip + 1);
10162 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10165 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10168 mono_save_token_info (cfg, image, token, cmethod);
10170 if (!mono_class_init (cmethod->klass))
10171 TYPE_LOAD_ERROR (cmethod->klass);
10173 context_used = mini_method_check_context_used (cfg, cmethod);
10175 if (mono_security_core_clr_enabled ())
10176 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10178 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10179 emit_class_init (cfg, cmethod->klass);
10180 CHECK_TYPELOAD (cmethod->klass);
10184 if (cfg->gsharedvt) {
10185 if (mini_is_gsharedvt_variable_signature (sig))
10186 GSHAREDVT_FAILURE (*ip);
10190 n = fsig->param_count;
10194 * Generate smaller code for the common newobj <exception> instruction in
10195 * argument checking code.
10197 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10198 is_exception_class (cmethod->klass) && n <= 2 &&
10199 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10200 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10201 MonoInst *iargs [3];
10205 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10208 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10211 iargs [1] = sp [0];
10212 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10215 iargs [1] = sp [0];
10216 iargs [2] = sp [1];
10217 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10220 g_assert_not_reached ();
10228 /* move the args to allow room for 'this' in the first position */
10234 /* check_call_signature () requires sp[0] to be set */
10235 this_ins.type = STACK_OBJ;
10236 sp [0] = &this_ins;
10237 if (check_call_signature (cfg, fsig, sp))
10242 if (mini_class_is_system_array (cmethod->klass)) {
10243 *sp = emit_get_rgctx_method (cfg, context_used,
10244 cmethod, MONO_RGCTX_INFO_METHOD);
10246 /* Avoid varargs in the common case */
10247 if (fsig->param_count == 1)
10248 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10249 else if (fsig->param_count == 2)
10250 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10251 else if (fsig->param_count == 3)
10252 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10253 else if (fsig->param_count == 4)
10254 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10256 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10257 } else if (cmethod->string_ctor) {
10258 g_assert (!context_used);
10259 g_assert (!vtable_arg);
10260 /* we simply pass a null pointer */
10261 EMIT_NEW_PCONST (cfg, *sp, NULL);
10262 /* now call the string ctor */
10263 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10265 if (cmethod->klass->valuetype) {
10266 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10267 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10268 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10273 * The code generated by mini_emit_virtual_call () expects
10274 * iargs [0] to be a boxed instance, but luckily the vcall
10275 * will be transformed into a normal call there.
10277 } else if (context_used) {
10278 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10281 MonoVTable *vtable = NULL;
10283 if (!cfg->compile_aot)
10284 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10285 CHECK_TYPELOAD (cmethod->klass);
10288 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10289 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10290 * As a workaround, we call class cctors before allocating objects.
10292 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10293 emit_class_init (cfg, cmethod->klass);
10294 if (cfg->verbose_level > 2)
10295 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10296 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10299 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10302 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10305 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10307 /* Now call the actual ctor */
10308 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10309 CHECK_CFG_EXCEPTION;
10312 if (alloc == NULL) {
10314 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10315 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10323 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10324 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10327 case CEE_CASTCLASS:
10332 token = read32 (ip + 1);
10333 klass = mini_get_class (method, token, generic_context);
10334 CHECK_TYPELOAD (klass);
10335 if (sp [0]->type != STACK_OBJ)
10338 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10339 ins->dreg = alloc_preg (cfg);
10340 ins->sreg1 = (*sp)->dreg;
10341 ins->klass = klass;
10342 ins->type = STACK_OBJ;
10343 MONO_ADD_INS (cfg->cbb, ins);
10345 CHECK_CFG_EXCEPTION;
10349 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10352 case CEE_UNBOX_ANY: {
10353 MonoInst *res, *addr;
10358 token = read32 (ip + 1);
10359 klass = mini_get_class (method, token, generic_context);
10360 CHECK_TYPELOAD (klass);
10362 mono_save_token_info (cfg, image, token, klass);
10364 context_used = mini_class_check_context_used (cfg, klass);
10366 if (mini_is_gsharedvt_klass (klass)) {
10367 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10369 } else if (generic_class_is_reference_type (cfg, klass)) {
10370 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10371 EMIT_NEW_PCONST (cfg, res, NULL);
10372 res->type = STACK_OBJ;
10374 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10375 res->dreg = alloc_preg (cfg);
10376 res->sreg1 = (*sp)->dreg;
10377 res->klass = klass;
10378 res->type = STACK_OBJ;
10379 MONO_ADD_INS (cfg->cbb, res);
10380 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10382 } else if (mono_class_is_nullable (klass)) {
10383 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10385 addr = handle_unbox (cfg, klass, sp, context_used);
10387 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10398 MonoClass *enum_class;
10399 MonoMethod *has_flag;
10405 token = read32 (ip + 1);
10406 klass = mini_get_class (method, token, generic_context);
10407 CHECK_TYPELOAD (klass);
10409 mono_save_token_info (cfg, image, token, klass);
10411 context_used = mini_class_check_context_used (cfg, klass);
10413 if (generic_class_is_reference_type (cfg, klass)) {
10419 if (klass == mono_defaults.void_class)
10421 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10423 /* frequent check in generic code: box (struct), brtrue */
10428 * <push int/long ptr>
10431 * constrained. MyFlags
10432 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10434 * If we find this sequence and the operand types on box and constrained
10435 * are equal, we can emit a specialized instruction sequence instead of
10436 * the very slow HasFlag () call.
10438 if ((cfg->opt & MONO_OPT_INTRINS) &&
10439 /* Cheap checks first. */
10440 ip + 5 + 6 + 5 < end &&
10441 ip [5] == CEE_PREFIX1 &&
10442 ip [6] == CEE_CONSTRAINED_ &&
10443 ip [11] == CEE_CALLVIRT &&
10444 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10445 mono_class_is_enum (klass) &&
10446 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10447 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10448 has_flag->klass == mono_defaults.enum_class &&
10449 !strcmp (has_flag->name, "HasFlag") &&
10450 has_flag->signature->hasthis &&
10451 has_flag->signature->param_count == 1) {
10452 CHECK_TYPELOAD (enum_class);
10454 if (enum_class == klass) {
10455 MonoInst *enum_this, *enum_flag;
10460 enum_this = sp [0];
10461 enum_flag = sp [1];
10463 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10468 // FIXME: LLVM can't handle the inconsistent bb linking
10469 if (!mono_class_is_nullable (klass) &&
10470 !mini_is_gsharedvt_klass (klass) &&
10471 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10472 (ip [5] == CEE_BRTRUE ||
10473 ip [5] == CEE_BRTRUE_S ||
10474 ip [5] == CEE_BRFALSE ||
10475 ip [5] == CEE_BRFALSE_S)) {
10476 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10478 MonoBasicBlock *true_bb, *false_bb;
10482 if (cfg->verbose_level > 3) {
10483 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10484 printf ("<box+brtrue opt>\n");
10489 case CEE_BRFALSE_S:
10492 target = ip + 1 + (signed char)(*ip);
10499 target = ip + 4 + (gint)(read32 (ip));
10503 g_assert_not_reached ();
10507 * We need to link both bblocks, since it is needed for handling stack
10508 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10509 * Branching to only one of them would lead to inconsistencies, so
10510 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10512 GET_BBLOCK (cfg, true_bb, target);
10513 GET_BBLOCK (cfg, false_bb, ip);
10515 mono_link_bblock (cfg, cfg->cbb, true_bb);
10516 mono_link_bblock (cfg, cfg->cbb, false_bb);
10518 if (sp != stack_start) {
10519 handle_stack_args (cfg, stack_start, sp - stack_start);
10521 CHECK_UNVERIFIABLE (cfg);
10524 if (COMPILE_LLVM (cfg)) {
10525 dreg = alloc_ireg (cfg);
10526 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10529 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10531 /* The JIT can't eliminate the iconst+compare */
10532 MONO_INST_NEW (cfg, ins, OP_BR);
10533 ins->inst_target_bb = is_true ? true_bb : false_bb;
10534 MONO_ADD_INS (cfg->cbb, ins);
10537 start_new_bblock = 1;
10541 *sp++ = handle_box (cfg, val, klass, context_used);
10543 CHECK_CFG_EXCEPTION;
10552 token = read32 (ip + 1);
10553 klass = mini_get_class (method, token, generic_context);
10554 CHECK_TYPELOAD (klass);
10556 mono_save_token_info (cfg, image, token, klass);
10558 context_used = mini_class_check_context_used (cfg, klass);
10560 if (mono_class_is_nullable (klass)) {
10563 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10564 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10568 ins = handle_unbox (cfg, klass, sp, context_used);
10581 MonoClassField *field;
10582 #ifndef DISABLE_REMOTING
10586 gboolean is_instance;
10588 gpointer addr = NULL;
10589 gboolean is_special_static;
10591 MonoInst *store_val = NULL;
10592 MonoInst *thread_ins;
10595 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10597 if (op == CEE_STFLD) {
10600 store_val = sp [1];
10605 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10607 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10610 if (op == CEE_STSFLD) {
10613 store_val = sp [0];
10618 token = read32 (ip + 1);
10619 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10620 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10621 klass = field->parent;
10624 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10627 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10628 FIELD_ACCESS_FAILURE (method, field);
10629 mono_class_init (klass);
10631 /* if the class is Critical then transparent code cannot access it's fields */
10632 if (!is_instance && mono_security_core_clr_enabled ())
10633 ensure_method_is_allowed_to_access_field (cfg, method, field);
10635 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10636 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10637 if (mono_security_core_clr_enabled ())
10638 ensure_method_is_allowed_to_access_field (cfg, method, field);
10641 ftype = mono_field_get_type (field);
10644 * LDFLD etc. is usable on static fields as well, so convert those cases to
10647 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10659 g_assert_not_reached ();
10661 is_instance = FALSE;
10664 context_used = mini_class_check_context_used (cfg, klass);
10666 /* INSTANCE CASE */
10668 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10669 if (op == CEE_STFLD) {
10670 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10672 #ifndef DISABLE_REMOTING
10673 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10674 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10675 MonoInst *iargs [5];
10677 GSHAREDVT_FAILURE (op);
10679 iargs [0] = sp [0];
10680 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10681 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10682 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10684 iargs [4] = sp [1];
10686 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10687 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10688 iargs, ip, cfg->real_offset, TRUE);
10689 CHECK_CFG_EXCEPTION;
10690 g_assert (costs > 0);
10692 cfg->real_offset += 5;
10694 inline_costs += costs;
10696 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10701 MonoInst *store, *wbarrier_ptr_ins = NULL;
10703 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10705 if (ins_flag & MONO_INST_VOLATILE) {
10706 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10707 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10710 if (mini_is_gsharedvt_klass (klass)) {
10711 MonoInst *offset_ins;
10713 context_used = mini_class_check_context_used (cfg, klass);
10715 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10716 /* The value is offset by 1 */
10717 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10718 dreg = alloc_ireg_mp (cfg);
10719 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10720 wbarrier_ptr_ins = ins;
10721 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10722 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10724 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10726 if (sp [0]->opcode != OP_LDADDR)
10727 store->flags |= MONO_INST_FAULT;
10729 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10730 if (mini_is_gsharedvt_klass (klass)) {
10731 g_assert (wbarrier_ptr_ins);
10732 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10734 /* insert call to write barrier */
10738 dreg = alloc_ireg_mp (cfg);
10739 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10740 emit_write_barrier (cfg, ptr, sp [1]);
10744 store->flags |= ins_flag;
10751 #ifndef DISABLE_REMOTING
10752 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10753 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10754 MonoInst *iargs [4];
10756 GSHAREDVT_FAILURE (op);
10758 iargs [0] = sp [0];
10759 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10760 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10761 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10762 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10763 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10764 iargs, ip, cfg->real_offset, TRUE);
10765 CHECK_CFG_EXCEPTION;
10766 g_assert (costs > 0);
10768 cfg->real_offset += 5;
10772 inline_costs += costs;
10774 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10780 if (sp [0]->type == STACK_VTYPE) {
10783 /* Have to compute the address of the variable */
10785 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10787 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10789 g_assert (var->klass == klass);
10791 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10795 if (op == CEE_LDFLDA) {
10796 if (sp [0]->type == STACK_OBJ) {
10797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10798 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10801 dreg = alloc_ireg_mp (cfg);
10803 if (mini_is_gsharedvt_klass (klass)) {
10804 MonoInst *offset_ins;
10806 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10807 /* The value is offset by 1 */
10808 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10809 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10811 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10813 ins->klass = mono_class_from_mono_type (field->type);
10814 ins->type = STACK_MP;
10819 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10821 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10822 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10831 if (mini_is_gsharedvt_klass (klass)) {
10832 MonoInst *offset_ins;
10834 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10835 /* The value is offset by 1 */
10836 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10837 dreg = alloc_ireg_mp (cfg);
10838 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10839 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10841 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10843 load->flags |= ins_flag;
10844 if (sp [0]->opcode != OP_LDADDR)
10845 load->flags |= MONO_INST_FAULT;
10857 context_used = mini_class_check_context_used (cfg, klass);
10859 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10860 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10864 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10865 * to be called here.
10867 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10868 mono_class_vtable (cfg->domain, klass);
10869 CHECK_TYPELOAD (klass);
10871 mono_domain_lock (cfg->domain);
10872 if (cfg->domain->special_static_fields)
10873 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10874 mono_domain_unlock (cfg->domain);
10876 is_special_static = mono_class_field_is_special_static (field);
10878 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10879 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10883 /* Generate IR to compute the field address */
10884 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10886 * Fast access to TLS data
10887 * Inline version of get_thread_static_data () in
10891 int idx, static_data_reg, array_reg, dreg;
10893 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10894 GSHAREDVT_FAILURE (op);
10896 static_data_reg = alloc_ireg (cfg);
10897 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10899 if (cfg->compile_aot) {
10900 int offset_reg, offset2_reg, idx_reg;
10902 /* For TLS variables, this will return the TLS offset */
10903 EMIT_NEW_SFLDACONST (cfg, ins, field);
10904 offset_reg = ins->dreg;
10905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10906 idx_reg = alloc_ireg (cfg);
10907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10909 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10910 array_reg = alloc_ireg (cfg);
10911 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10912 offset2_reg = alloc_ireg (cfg);
10913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10915 dreg = alloc_ireg (cfg);
10916 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10918 offset = (gsize)addr & 0x7fffffff;
10919 idx = offset & 0x3f;
10921 array_reg = alloc_ireg (cfg);
10922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10923 dreg = alloc_ireg (cfg);
10924 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10926 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10927 (cfg->compile_aot && is_special_static) ||
10928 (context_used && is_special_static)) {
10929 MonoInst *iargs [2];
10931 g_assert (field->parent);
10932 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10933 if (context_used) {
10934 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10935 field, MONO_RGCTX_INFO_CLASS_FIELD);
10937 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10939 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10940 } else if (context_used) {
10941 MonoInst *static_data;
10944 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10945 method->klass->name_space, method->klass->name, method->name,
10946 depth, field->offset);
10949 if (mono_class_needs_cctor_run (klass, method))
10950 emit_class_init (cfg, klass);
10953 * The pointer we're computing here is
10955 * super_info.static_data + field->offset
10957 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10958 klass, MONO_RGCTX_INFO_STATIC_DATA);
10960 if (mini_is_gsharedvt_klass (klass)) {
10961 MonoInst *offset_ins;
10963 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10964 /* The value is offset by 1 */
10965 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10966 dreg = alloc_ireg_mp (cfg);
10967 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10968 } else if (field->offset == 0) {
10971 int addr_reg = mono_alloc_preg (cfg);
10972 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10974 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10975 MonoInst *iargs [2];
10977 g_assert (field->parent);
10978 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10979 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10980 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10982 MonoVTable *vtable = NULL;
10984 if (!cfg->compile_aot)
10985 vtable = mono_class_vtable (cfg->domain, klass);
10986 CHECK_TYPELOAD (klass);
10989 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10990 if (!(g_slist_find (class_inits, klass))) {
10991 emit_class_init (cfg, klass);
10992 if (cfg->verbose_level > 2)
10993 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10994 class_inits = g_slist_prepend (class_inits, klass);
10997 if (cfg->run_cctors) {
10998 /* This makes so that inline cannot trigger */
10999 /* .cctors: too many apps depend on them */
11000 /* running with a specific order... */
11002 if (! vtable->initialized)
11003 INLINE_FAILURE ("class init");
11004 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11005 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11006 goto exception_exit;
11010 if (cfg->compile_aot)
11011 EMIT_NEW_SFLDACONST (cfg, ins, field);
11014 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11016 EMIT_NEW_PCONST (cfg, ins, addr);
11019 MonoInst *iargs [1];
11020 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11021 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11025 /* Generate IR to do the actual load/store operation */
11027 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11028 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11029 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11032 if (op == CEE_LDSFLDA) {
11033 ins->klass = mono_class_from_mono_type (ftype);
11034 ins->type = STACK_PTR;
11036 } else if (op == CEE_STSFLD) {
11039 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11040 store->flags |= ins_flag;
11042 gboolean is_const = FALSE;
11043 MonoVTable *vtable = NULL;
11044 gpointer addr = NULL;
11046 if (!context_used) {
11047 vtable = mono_class_vtable (cfg->domain, klass);
11048 CHECK_TYPELOAD (klass);
11050 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11051 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11052 int ro_type = ftype->type;
11054 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11055 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11056 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11059 GSHAREDVT_FAILURE (op);
11061 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11064 case MONO_TYPE_BOOLEAN:
11066 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11070 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11073 case MONO_TYPE_CHAR:
11075 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11079 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11084 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11088 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11093 case MONO_TYPE_PTR:
11094 case MONO_TYPE_FNPTR:
11095 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11096 type_to_eval_stack_type ((cfg), field->type, *sp);
11099 case MONO_TYPE_STRING:
11100 case MONO_TYPE_OBJECT:
11101 case MONO_TYPE_CLASS:
11102 case MONO_TYPE_SZARRAY:
11103 case MONO_TYPE_ARRAY:
11104 if (!mono_gc_is_moving ()) {
11105 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11106 type_to_eval_stack_type ((cfg), field->type, *sp);
11114 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11119 case MONO_TYPE_VALUETYPE:
11129 CHECK_STACK_OVF (1);
11131 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11132 load->flags |= ins_flag;
11138 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11139 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11140 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11151 token = read32 (ip + 1);
11152 klass = mini_get_class (method, token, generic_context);
11153 CHECK_TYPELOAD (klass);
11154 if (ins_flag & MONO_INST_VOLATILE) {
11155 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11156 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11158 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11159 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11160 ins->flags |= ins_flag;
11161 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11162 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11163 /* insert call to write barrier */
11164 emit_write_barrier (cfg, sp [0], sp [1]);
11176 const char *data_ptr;
11178 guint32 field_token;
11184 token = read32 (ip + 1);
11186 klass = mini_get_class (method, token, generic_context);
11187 CHECK_TYPELOAD (klass);
11189 context_used = mini_class_check_context_used (cfg, klass);
11191 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11192 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11193 ins->sreg1 = sp [0]->dreg;
11194 ins->type = STACK_I4;
11195 ins->dreg = alloc_ireg (cfg);
11196 MONO_ADD_INS (cfg->cbb, ins);
11197 *sp = mono_decompose_opcode (cfg, ins);
11200 if (context_used) {
11201 MonoInst *args [3];
11202 MonoClass *array_class = mono_array_class_get (klass, 1);
11203 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11205 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11208 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11209 array_class, MONO_RGCTX_INFO_VTABLE);
11214 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11216 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11218 if (cfg->opt & MONO_OPT_SHARED) {
11219 /* Decompose now to avoid problems with references to the domainvar */
11220 MonoInst *iargs [3];
11222 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11223 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11224 iargs [2] = sp [0];
11226 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11228 /* Decompose later since it is needed by abcrem */
11229 MonoClass *array_type = mono_array_class_get (klass, 1);
11230 mono_class_vtable (cfg->domain, array_type);
11231 CHECK_TYPELOAD (array_type);
11233 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11234 ins->dreg = alloc_ireg_ref (cfg);
11235 ins->sreg1 = sp [0]->dreg;
11236 ins->inst_newa_class = klass;
11237 ins->type = STACK_OBJ;
11238 ins->klass = array_type;
11239 MONO_ADD_INS (cfg->cbb, ins);
11240 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11241 cfg->cbb->has_array_access = TRUE;
11243 /* Needed so mono_emit_load_get_addr () gets called */
11244 mono_get_got_var (cfg);
11254 * we inline/optimize the initialization sequence if possible.
11255 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11256 * for small sizes open code the memcpy
11257 * ensure the rva field is big enough
11259 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11260 MonoMethod *memcpy_method = get_memcpy_method ();
11261 MonoInst *iargs [3];
11262 int add_reg = alloc_ireg_mp (cfg);
11264 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11265 if (cfg->compile_aot) {
11266 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11268 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11270 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11271 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11280 if (sp [0]->type != STACK_OBJ)
11283 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11284 ins->dreg = alloc_preg (cfg);
11285 ins->sreg1 = sp [0]->dreg;
11286 ins->type = STACK_I4;
11287 /* This flag will be inherited by the decomposition */
11288 ins->flags |= MONO_INST_FAULT;
11289 MONO_ADD_INS (cfg->cbb, ins);
11290 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11291 cfg->cbb->has_array_access = TRUE;
11299 if (sp [0]->type != STACK_OBJ)
11302 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11304 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11305 CHECK_TYPELOAD (klass);
11306 /* we need to make sure that this array is exactly the type it needs
11307 * to be for correctness. the wrappers are lax with their usage
11308 * so we need to ignore them here
11310 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11311 MonoClass *array_class = mono_array_class_get (klass, 1);
11312 mini_emit_check_array_type (cfg, sp [0], array_class);
11313 CHECK_TYPELOAD (array_class);
11317 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11322 case CEE_LDELEM_I1:
11323 case CEE_LDELEM_U1:
11324 case CEE_LDELEM_I2:
11325 case CEE_LDELEM_U2:
11326 case CEE_LDELEM_I4:
11327 case CEE_LDELEM_U4:
11328 case CEE_LDELEM_I8:
11330 case CEE_LDELEM_R4:
11331 case CEE_LDELEM_R8:
11332 case CEE_LDELEM_REF: {
11338 if (*ip == CEE_LDELEM) {
11340 token = read32 (ip + 1);
11341 klass = mini_get_class (method, token, generic_context);
11342 CHECK_TYPELOAD (klass);
11343 mono_class_init (klass);
11346 klass = array_access_to_klass (*ip);
11348 if (sp [0]->type != STACK_OBJ)
11351 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11353 if (mini_is_gsharedvt_variable_klass (klass)) {
11354 // FIXME-VT: OP_ICONST optimization
11355 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11356 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11357 ins->opcode = OP_LOADV_MEMBASE;
11358 } else if (sp [1]->opcode == OP_ICONST) {
11359 int array_reg = sp [0]->dreg;
11360 int index_reg = sp [1]->dreg;
11361 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11363 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11364 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11366 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11367 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11369 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11370 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11373 if (*ip == CEE_LDELEM)
11380 case CEE_STELEM_I1:
11381 case CEE_STELEM_I2:
11382 case CEE_STELEM_I4:
11383 case CEE_STELEM_I8:
11384 case CEE_STELEM_R4:
11385 case CEE_STELEM_R8:
11386 case CEE_STELEM_REF:
11391 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11393 if (*ip == CEE_STELEM) {
11395 token = read32 (ip + 1);
11396 klass = mini_get_class (method, token, generic_context);
11397 CHECK_TYPELOAD (klass);
11398 mono_class_init (klass);
11401 klass = array_access_to_klass (*ip);
11403 if (sp [0]->type != STACK_OBJ)
11406 emit_array_store (cfg, klass, sp, TRUE);
11408 if (*ip == CEE_STELEM)
11415 case CEE_CKFINITE: {
11419 if (cfg->llvm_only) {
11420 MonoInst *iargs [1];
11422 iargs [0] = sp [0];
11423 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11425 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11426 ins->sreg1 = sp [0]->dreg;
11427 ins->dreg = alloc_freg (cfg);
11428 ins->type = STACK_R8;
11429 MONO_ADD_INS (cfg->cbb, ins);
11431 *sp++ = mono_decompose_opcode (cfg, ins);
11437 case CEE_REFANYVAL: {
11438 MonoInst *src_var, *src;
11440 int klass_reg = alloc_preg (cfg);
11441 int dreg = alloc_preg (cfg);
11443 GSHAREDVT_FAILURE (*ip);
11446 MONO_INST_NEW (cfg, ins, *ip);
11449 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11450 CHECK_TYPELOAD (klass);
11452 context_used = mini_class_check_context_used (cfg, klass);
11455 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11457 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11458 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11459 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11461 if (context_used) {
11462 MonoInst *klass_ins;
11464 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11465 klass, MONO_RGCTX_INFO_KLASS);
11468 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11469 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11471 mini_emit_class_check (cfg, klass_reg, klass);
11473 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11474 ins->type = STACK_MP;
11475 ins->klass = klass;
11480 case CEE_MKREFANY: {
11481 MonoInst *loc, *addr;
11483 GSHAREDVT_FAILURE (*ip);
11486 MONO_INST_NEW (cfg, ins, *ip);
11489 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11490 CHECK_TYPELOAD (klass);
11492 context_used = mini_class_check_context_used (cfg, klass);
11494 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11495 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11497 if (context_used) {
11498 MonoInst *const_ins;
11499 int type_reg = alloc_preg (cfg);
11501 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11502 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11503 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11504 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11506 int const_reg = alloc_preg (cfg);
11507 int type_reg = alloc_preg (cfg);
11509 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11510 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11512 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11514 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11516 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11517 ins->type = STACK_VTYPE;
11518 ins->klass = mono_defaults.typed_reference_class;
11523 case CEE_LDTOKEN: {
11525 MonoClass *handle_class;
11527 CHECK_STACK_OVF (1);
11530 n = read32 (ip + 1);
11532 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11533 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11534 handle = mono_method_get_wrapper_data (method, n);
11535 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11536 if (handle_class == mono_defaults.typehandle_class)
11537 handle = &((MonoClass*)handle)->byval_arg;
11540 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11545 mono_class_init (handle_class);
11546 if (cfg->gshared) {
11547 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11548 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11549 /* This case handles ldtoken
11550 of an open type, like for
11553 } else if (handle_class == mono_defaults.typehandle_class) {
11554 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11555 } else if (handle_class == mono_defaults.fieldhandle_class)
11556 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11557 else if (handle_class == mono_defaults.methodhandle_class)
11558 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11560 g_assert_not_reached ();
11563 if ((cfg->opt & MONO_OPT_SHARED) &&
11564 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11565 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11566 MonoInst *addr, *vtvar, *iargs [3];
11567 int method_context_used;
11569 method_context_used = mini_method_check_context_used (cfg, method);
11571 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11573 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11574 EMIT_NEW_ICONST (cfg, iargs [1], n);
11575 if (method_context_used) {
11576 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11577 method, MONO_RGCTX_INFO_METHOD);
11578 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11580 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11581 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11583 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11585 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11587 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11589 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11590 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11591 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11592 (cmethod->klass == mono_defaults.systemtype_class) &&
11593 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11594 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11596 mono_class_init (tclass);
11597 if (context_used) {
11598 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11599 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11600 } else if (cfg->compile_aot) {
11601 if (method->wrapper_type) {
11602 error_init (&error); //got to do it since there are multiple conditionals below
11603 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11604 /* Special case for static synchronized wrappers */
11605 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11607 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11608 /* FIXME: n is not a normal token */
11610 EMIT_NEW_PCONST (cfg, ins, NULL);
11613 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11616 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11618 EMIT_NEW_PCONST (cfg, ins, rt);
11620 ins->type = STACK_OBJ;
11621 ins->klass = cmethod->klass;
11624 MonoInst *addr, *vtvar;
11626 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11628 if (context_used) {
11629 if (handle_class == mono_defaults.typehandle_class) {
11630 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11631 mono_class_from_mono_type ((MonoType *)handle),
11632 MONO_RGCTX_INFO_TYPE);
11633 } else if (handle_class == mono_defaults.methodhandle_class) {
11634 ins = emit_get_rgctx_method (cfg, context_used,
11635 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11636 } else if (handle_class == mono_defaults.fieldhandle_class) {
11637 ins = emit_get_rgctx_field (cfg, context_used,
11638 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11640 g_assert_not_reached ();
11642 } else if (cfg->compile_aot) {
11643 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11645 EMIT_NEW_PCONST (cfg, ins, handle);
11647 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11648 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11649 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11659 if (sp [-1]->type != STACK_OBJ)
11662 MONO_INST_NEW (cfg, ins, OP_THROW);
11664 ins->sreg1 = sp [0]->dreg;
11666 cfg->cbb->out_of_line = TRUE;
11667 MONO_ADD_INS (cfg->cbb, ins);
11668 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11669 MONO_ADD_INS (cfg->cbb, ins);
11672 link_bblock (cfg, cfg->cbb, end_bblock);
11673 start_new_bblock = 1;
11674 /* This can complicate code generation for llvm since the return value might not be defined */
11675 if (COMPILE_LLVM (cfg))
11676 INLINE_FAILURE ("throw");
11678 case CEE_ENDFINALLY:
11679 if (!ip_in_finally_clause (cfg, ip - header->code))
11681 /* mono_save_seq_point_info () depends on this */
11682 if (sp != stack_start)
11683 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11684 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11685 MONO_ADD_INS (cfg->cbb, ins);
11687 start_new_bblock = 1;
11690 * Control will leave the method so empty the stack, otherwise
11691 * the next basic block will start with a nonempty stack.
11693 while (sp != stack_start) {
11698 case CEE_LEAVE_S: {
11701 if (*ip == CEE_LEAVE) {
11703 target = ip + 5 + (gint32)read32(ip + 1);
11706 target = ip + 2 + (signed char)(ip [1]);
11709 /* empty the stack */
11710 while (sp != stack_start) {
11715 * If this leave statement is in a catch block, check for a
11716 * pending exception, and rethrow it if necessary.
11717 * We avoid doing this in runtime invoke wrappers, since those are called
11718 * by native code which excepts the wrapper to catch all exceptions.
11720 for (i = 0; i < header->num_clauses; ++i) {
11721 MonoExceptionClause *clause = &header->clauses [i];
11724 * Use <= in the final comparison to handle clauses with multiple
11725 * leave statements, like in bug #78024.
11726 * The ordering of the exception clauses guarantees that we find the
11727 * innermost clause.
11729 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11731 MonoBasicBlock *dont_throw;
11736 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11739 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11741 NEW_BBLOCK (cfg, dont_throw);
11744 * Currently, we always rethrow the abort exception, despite the
11745 * fact that this is not correct. See thread6.cs for an example.
11746 * But propagating the abort exception is more important than
11747 * getting the sematics right.
11749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11750 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11751 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11753 MONO_START_BB (cfg, dont_throw);
11758 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11761 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11763 MonoExceptionClause *clause;
11765 for (tmp = handlers; tmp; tmp = tmp->next) {
11766 clause = (MonoExceptionClause *)tmp->data;
11767 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11769 link_bblock (cfg, cfg->cbb, tblock);
11770 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11771 ins->inst_target_bb = tblock;
11772 ins->inst_eh_block = clause;
11773 MONO_ADD_INS (cfg->cbb, ins);
11774 cfg->cbb->has_call_handler = 1;
11775 if (COMPILE_LLVM (cfg)) {
11776 MonoBasicBlock *target_bb;
11779 * Link the finally bblock with the target, since it will
11780 * conceptually branch there.
11782 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11783 GET_BBLOCK (cfg, target_bb, target);
11784 link_bblock (cfg, tblock, target_bb);
11787 g_list_free (handlers);
11790 MONO_INST_NEW (cfg, ins, OP_BR);
11791 MONO_ADD_INS (cfg->cbb, ins);
11792 GET_BBLOCK (cfg, tblock, target);
11793 link_bblock (cfg, cfg->cbb, tblock);
11794 ins->inst_target_bb = tblock;
11796 start_new_bblock = 1;
11798 if (*ip == CEE_LEAVE)
11807 * Mono specific opcodes
11809 case MONO_CUSTOM_PREFIX: {
11811 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11815 case CEE_MONO_ICALL: {
11817 MonoJitICallInfo *info;
11819 token = read32 (ip + 2);
11820 func = mono_method_get_wrapper_data (method, token);
11821 info = mono_find_jit_icall_by_addr (func);
11823 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11826 CHECK_STACK (info->sig->param_count);
11827 sp -= info->sig->param_count;
11829 ins = mono_emit_jit_icall (cfg, info->func, sp);
11830 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11834 inline_costs += 10 * num_calls++;
11838 case CEE_MONO_LDPTR_CARD_TABLE:
11839 case CEE_MONO_LDPTR_NURSERY_START:
11840 case CEE_MONO_LDPTR_NURSERY_BITS:
11841 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11842 CHECK_STACK_OVF (1);
11845 case CEE_MONO_LDPTR_CARD_TABLE:
11846 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11848 case CEE_MONO_LDPTR_NURSERY_START:
11849 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11851 case CEE_MONO_LDPTR_NURSERY_BITS:
11852 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11854 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11855 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11861 inline_costs += 10 * num_calls++;
11864 case CEE_MONO_LDPTR: {
11867 CHECK_STACK_OVF (1);
11869 token = read32 (ip + 2);
11871 ptr = mono_method_get_wrapper_data (method, token);
11872 EMIT_NEW_PCONST (cfg, ins, ptr);
11875 inline_costs += 10 * num_calls++;
11876 /* Can't embed random pointers into AOT code */
11880 case CEE_MONO_JIT_ICALL_ADDR: {
11881 MonoJitICallInfo *callinfo;
11884 CHECK_STACK_OVF (1);
11886 token = read32 (ip + 2);
11888 ptr = mono_method_get_wrapper_data (method, token);
11889 callinfo = mono_find_jit_icall_by_addr (ptr);
11890 g_assert (callinfo);
11891 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11894 inline_costs += 10 * num_calls++;
11897 case CEE_MONO_ICALL_ADDR: {
11898 MonoMethod *cmethod;
11901 CHECK_STACK_OVF (1);
11903 token = read32 (ip + 2);
11905 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11907 if (cfg->compile_aot) {
11908 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11910 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11911 * before the call, its not needed when using direct pinvoke.
11912 * This is not an optimization, but its used to avoid looking up pinvokes
11913 * on platforms which don't support dlopen ().
11915 EMIT_NEW_PCONST (cfg, ins, NULL);
11917 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11920 ptr = mono_lookup_internal_call (cmethod);
11922 EMIT_NEW_PCONST (cfg, ins, ptr);
11928 case CEE_MONO_VTADDR: {
11929 MonoInst *src_var, *src;
11935 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11936 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11941 case CEE_MONO_NEWOBJ: {
11942 MonoInst *iargs [2];
11944 CHECK_STACK_OVF (1);
11946 token = read32 (ip + 2);
11947 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11948 mono_class_init (klass);
11949 NEW_DOMAINCONST (cfg, iargs [0]);
11950 MONO_ADD_INS (cfg->cbb, iargs [0]);
11951 NEW_CLASSCONST (cfg, iargs [1], klass);
11952 MONO_ADD_INS (cfg->cbb, iargs [1]);
11953 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11955 inline_costs += 10 * num_calls++;
11958 case CEE_MONO_OBJADDR:
11961 MONO_INST_NEW (cfg, ins, OP_MOVE);
11962 ins->dreg = alloc_ireg_mp (cfg);
11963 ins->sreg1 = sp [0]->dreg;
11964 ins->type = STACK_MP;
11965 MONO_ADD_INS (cfg->cbb, ins);
11969 case CEE_MONO_LDNATIVEOBJ:
11971 * Similar to LDOBJ, but instead load the unmanaged
11972 * representation of the vtype to the stack.
11977 token = read32 (ip + 2);
11978 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11979 g_assert (klass->valuetype);
11980 mono_class_init (klass);
11983 MonoInst *src, *dest, *temp;
11986 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11987 temp->backend.is_pinvoke = 1;
11988 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11989 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11991 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11992 dest->type = STACK_VTYPE;
11993 dest->klass = klass;
11999 case CEE_MONO_RETOBJ: {
12001 * Same as RET, but return the native representation of a vtype
12004 g_assert (cfg->ret);
12005 g_assert (mono_method_signature (method)->pinvoke);
12010 token = read32 (ip + 2);
12011 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12013 if (!cfg->vret_addr) {
12014 g_assert (cfg->ret_var_is_local);
12016 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12018 EMIT_NEW_RETLOADA (cfg, ins);
12020 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12022 if (sp != stack_start)
12025 MONO_INST_NEW (cfg, ins, OP_BR);
12026 ins->inst_target_bb = end_bblock;
12027 MONO_ADD_INS (cfg->cbb, ins);
12028 link_bblock (cfg, cfg->cbb, end_bblock);
12029 start_new_bblock = 1;
12033 case CEE_MONO_SAVE_LMF:
12034 case CEE_MONO_RESTORE_LMF:
12037 case CEE_MONO_CLASSCONST:
12038 CHECK_STACK_OVF (1);
12040 token = read32 (ip + 2);
12041 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12044 inline_costs += 10 * num_calls++;
12046 case CEE_MONO_NOT_TAKEN:
12047 cfg->cbb->out_of_line = TRUE;
12050 case CEE_MONO_TLS: {
12053 CHECK_STACK_OVF (1);
12055 key = (MonoTlsKey)read32 (ip + 2);
12056 g_assert (key < TLS_KEY_NUM);
12058 ins = mono_create_tls_get (cfg, key);
12060 ins->type = STACK_PTR;
12065 case CEE_MONO_DYN_CALL: {
12066 MonoCallInst *call;
12068 /* It would be easier to call a trampoline, but that would put an
12069 * extra frame on the stack, confusing exception handling. So
12070 * implement it inline using an opcode for now.
12073 if (!cfg->dyn_call_var) {
12074 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12075 /* prevent it from being register allocated */
12076 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12079 /* Has to use a call inst since it local regalloc expects it */
12080 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12081 ins = (MonoInst*)call;
12083 ins->sreg1 = sp [0]->dreg;
12084 ins->sreg2 = sp [1]->dreg;
12085 MONO_ADD_INS (cfg->cbb, ins);
12087 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12090 inline_costs += 10 * num_calls++;
12094 case CEE_MONO_MEMORY_BARRIER: {
12096 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12100 case CEE_MONO_ATOMIC_STORE_I4: {
12101 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12107 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12108 ins->dreg = sp [0]->dreg;
12109 ins->sreg1 = sp [1]->dreg;
12110 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12111 MONO_ADD_INS (cfg->cbb, ins);
12116 case CEE_MONO_JIT_ATTACH: {
12117 MonoInst *args [16], *domain_ins;
12118 MonoInst *ad_ins, *jit_tls_ins;
12119 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12121 g_assert (!mono_threads_is_coop_enabled ());
12123 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12125 EMIT_NEW_PCONST (cfg, ins, NULL);
12126 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12128 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12129 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12131 if (ad_ins && jit_tls_ins) {
12132 NEW_BBLOCK (cfg, next_bb);
12133 NEW_BBLOCK (cfg, call_bb);
12135 if (cfg->compile_aot) {
12136 /* AOT code is only used in the root domain */
12137 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12139 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12141 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12142 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12144 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12145 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12147 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12148 MONO_START_BB (cfg, call_bb);
12151 /* AOT code is only used in the root domain */
12152 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12153 if (cfg->compile_aot) {
12157 * This is called on unattached threads, so it cannot go through the trampoline
12158 * infrastructure. Use an indirect call through a got slot initialized at load time
12161 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12162 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12164 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12166 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12169 MONO_START_BB (cfg, next_bb);
12174 case CEE_MONO_JIT_DETACH: {
12175 MonoInst *args [16];
12177 /* Restore the original domain */
12178 dreg = alloc_ireg (cfg);
12179 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12180 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12184 case CEE_MONO_CALLI_EXTRA_ARG: {
12186 MonoMethodSignature *fsig;
12190 * This is the same as CEE_CALLI, but passes an additional argument
12191 * to the called method in llvmonly mode.
12192 * This is only used by delegate invoke wrappers to call the
12193 * actual delegate method.
12195 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12198 token = read32 (ip + 2);
12206 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12209 if (cfg->llvm_only)
12210 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12212 n = fsig->param_count + fsig->hasthis + 1;
12219 if (cfg->llvm_only) {
12221 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12222 * cconv. This is set by mono_init_delegate ().
12224 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12225 MonoInst *callee = addr;
12226 MonoInst *call, *localloc_ins;
12227 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12228 int low_bit_reg = alloc_preg (cfg);
12230 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12231 NEW_BBLOCK (cfg, end_bb);
12233 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12234 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12235 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12237 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12238 addr = emit_get_rgctx_sig (cfg, context_used,
12239 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12241 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12243 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12244 ins->dreg = alloc_preg (cfg);
12245 ins->inst_imm = 2 * SIZEOF_VOID_P;
12246 MONO_ADD_INS (cfg->cbb, ins);
12247 localloc_ins = ins;
12248 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12249 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12250 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12252 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12253 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12255 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12256 MONO_START_BB (cfg, is_gsharedvt_bb);
12257 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12258 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12259 ins->dreg = call->dreg;
12261 MONO_START_BB (cfg, end_bb);
12263 /* Caller uses a normal calling conv */
12265 MonoInst *callee = addr;
12266 MonoInst *call, *localloc_ins;
12267 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12268 int low_bit_reg = alloc_preg (cfg);
12270 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12271 NEW_BBLOCK (cfg, end_bb);
12273 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12275 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12277 /* Normal case: callee uses a normal cconv, no conversion is needed */
12278 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12279 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12280 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12281 MONO_START_BB (cfg, is_gsharedvt_bb);
12282 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12283 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12284 MONO_ADD_INS (cfg->cbb, addr);
12286 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12288 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12289 ins->dreg = alloc_preg (cfg);
12290 ins->inst_imm = 2 * SIZEOF_VOID_P;
12291 MONO_ADD_INS (cfg->cbb, ins);
12292 localloc_ins = ins;
12293 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12294 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12295 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12297 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12298 ins->dreg = call->dreg;
12299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12301 MONO_START_BB (cfg, end_bb);
12304 /* Same as CEE_CALLI */
12305 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12307 * We pass the address to the gsharedvt trampoline in the rgctx reg
12309 MonoInst *callee = addr;
12311 addr = emit_get_rgctx_sig (cfg, context_used,
12312 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12313 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12315 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12319 if (!MONO_TYPE_IS_VOID (fsig->ret))
12320 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12322 CHECK_CFG_EXCEPTION;
12326 constrained_class = NULL;
12329 case CEE_MONO_LDDOMAIN:
12330 CHECK_STACK_OVF (1);
12331 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12335 case CEE_MONO_GET_LAST_ERROR:
12337 CHECK_STACK_OVF (1);
12339 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12340 ins->dreg = alloc_dreg (cfg, STACK_I4);
12341 ins->type = STACK_I4;
12342 MONO_ADD_INS (cfg->cbb, ins);
12348 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12354 case CEE_PREFIX1: {
12357 case CEE_ARGLIST: {
12358 /* somewhat similar to LDTOKEN */
12359 MonoInst *addr, *vtvar;
12360 CHECK_STACK_OVF (1);
12361 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12363 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12364 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12366 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12367 ins->type = STACK_VTYPE;
12368 ins->klass = mono_defaults.argumenthandle_class;
12378 MonoInst *cmp, *arg1, *arg2;
12386 * The following transforms:
12387 * CEE_CEQ into OP_CEQ
12388 * CEE_CGT into OP_CGT
12389 * CEE_CGT_UN into OP_CGT_UN
12390 * CEE_CLT into OP_CLT
12391 * CEE_CLT_UN into OP_CLT_UN
12393 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12395 MONO_INST_NEW (cfg, ins, cmp->opcode);
12396 cmp->sreg1 = arg1->dreg;
12397 cmp->sreg2 = arg2->dreg;
12398 type_from_op (cfg, cmp, arg1, arg2);
12400 add_widen_op (cfg, cmp, &arg1, &arg2);
12401 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12402 cmp->opcode = OP_LCOMPARE;
12403 else if (arg1->type == STACK_R4)
12404 cmp->opcode = OP_RCOMPARE;
12405 else if (arg1->type == STACK_R8)
12406 cmp->opcode = OP_FCOMPARE;
12408 cmp->opcode = OP_ICOMPARE;
12409 MONO_ADD_INS (cfg->cbb, cmp);
12410 ins->type = STACK_I4;
12411 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12412 type_from_op (cfg, ins, arg1, arg2);
12414 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12416 * The backends expect the fceq opcodes to do the
12419 ins->sreg1 = cmp->sreg1;
12420 ins->sreg2 = cmp->sreg2;
12423 MONO_ADD_INS (cfg->cbb, ins);
12429 MonoInst *argconst;
12430 MonoMethod *cil_method;
12432 CHECK_STACK_OVF (1);
12434 n = read32 (ip + 2);
12435 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12438 mono_class_init (cmethod->klass);
12440 mono_save_token_info (cfg, image, n, cmethod);
12442 context_used = mini_method_check_context_used (cfg, cmethod);
12444 cil_method = cmethod;
12445 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12446 emit_method_access_failure (cfg, method, cil_method);
12448 if (mono_security_core_clr_enabled ())
12449 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12452 * Optimize the common case of ldftn+delegate creation
12454 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12455 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12456 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12457 MonoInst *target_ins, *handle_ins;
12458 MonoMethod *invoke;
12459 int invoke_context_used;
12461 invoke = mono_get_delegate_invoke (ctor_method->klass);
12462 if (!invoke || !mono_method_signature (invoke))
12465 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12467 target_ins = sp [-1];
12469 if (mono_security_core_clr_enabled ())
12470 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12472 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12473 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12474 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12475 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12476 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12480 /* FIXME: SGEN support */
12481 if (invoke_context_used == 0 || cfg->llvm_only) {
12483 if (cfg->verbose_level > 3)
12484 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12485 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12488 CHECK_CFG_EXCEPTION;
12498 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12499 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12503 inline_costs += 10 * num_calls++;
12506 case CEE_LDVIRTFTN: {
12507 MonoInst *args [2];
12511 n = read32 (ip + 2);
12512 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12515 mono_class_init (cmethod->klass);
12517 context_used = mini_method_check_context_used (cfg, cmethod);
12519 if (mono_security_core_clr_enabled ())
12520 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12523 * Optimize the common case of ldvirtftn+delegate creation
12525 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12526 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12527 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12528 MonoInst *target_ins, *handle_ins;
12529 MonoMethod *invoke;
12530 int invoke_context_used;
12531 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12533 invoke = mono_get_delegate_invoke (ctor_method->klass);
12534 if (!invoke || !mono_method_signature (invoke))
12537 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12539 target_ins = sp [-1];
12541 if (mono_security_core_clr_enabled ())
12542 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12544 /* FIXME: SGEN support */
12545 if (invoke_context_used == 0 || cfg->llvm_only) {
12547 if (cfg->verbose_level > 3)
12548 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12549 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12552 CHECK_CFG_EXCEPTION;
12565 args [1] = emit_get_rgctx_method (cfg, context_used,
12566 cmethod, MONO_RGCTX_INFO_METHOD);
12569 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12571 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12574 inline_costs += 10 * num_calls++;
12578 CHECK_STACK_OVF (1);
12580 n = read16 (ip + 2);
12582 EMIT_NEW_ARGLOAD (cfg, ins, n);
12587 CHECK_STACK_OVF (1);
12589 n = read16 (ip + 2);
12591 NEW_ARGLOADA (cfg, ins, n);
12592 MONO_ADD_INS (cfg->cbb, ins);
12600 n = read16 (ip + 2);
12602 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12604 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12608 CHECK_STACK_OVF (1);
12610 n = read16 (ip + 2);
12612 EMIT_NEW_LOCLOAD (cfg, ins, n);
12617 unsigned char *tmp_ip;
12618 CHECK_STACK_OVF (1);
12620 n = read16 (ip + 2);
12623 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12629 EMIT_NEW_LOCLOADA (cfg, ins, n);
12638 n = read16 (ip + 2);
12640 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12642 emit_stloc_ir (cfg, sp, header, n);
12646 case CEE_LOCALLOC: {
12648 MonoBasicBlock *non_zero_bb, *end_bb;
12649 int alloc_ptr = alloc_preg (cfg);
12651 if (sp != stack_start)
12653 if (cfg->method != method)
12655 * Inlining this into a loop in a parent could lead to
12656 * stack overflows which is different behavior than the
12657 * non-inlined case, thus disable inlining in this case.
12659 INLINE_FAILURE("localloc");
12661 NEW_BBLOCK (cfg, non_zero_bb);
12662 NEW_BBLOCK (cfg, end_bb);
12664 /* if size != zero */
12665 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12666 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12668 //size is zero, so result is NULL
12669 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12672 MONO_START_BB (cfg, non_zero_bb);
12673 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12674 ins->dreg = alloc_ptr;
12675 ins->sreg1 = sp [0]->dreg;
12676 ins->type = STACK_PTR;
12677 MONO_ADD_INS (cfg->cbb, ins);
12679 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12681 ins->flags |= MONO_INST_INIT;
12683 MONO_START_BB (cfg, end_bb);
12684 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12685 ins->type = STACK_PTR;
12691 case CEE_ENDFILTER: {
12692 MonoExceptionClause *clause, *nearest;
12697 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12699 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12700 ins->sreg1 = (*sp)->dreg;
12701 MONO_ADD_INS (cfg->cbb, ins);
12702 start_new_bblock = 1;
12706 for (cc = 0; cc < header->num_clauses; ++cc) {
12707 clause = &header->clauses [cc];
12708 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12709 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12710 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12713 g_assert (nearest);
12714 if ((ip - header->code) != nearest->handler_offset)
12719 case CEE_UNALIGNED_:
12720 ins_flag |= MONO_INST_UNALIGNED;
12721 /* FIXME: record alignment? we can assume 1 for now */
12725 case CEE_VOLATILE_:
12726 ins_flag |= MONO_INST_VOLATILE;
12730 ins_flag |= MONO_INST_TAILCALL;
12731 cfg->flags |= MONO_CFG_HAS_TAIL;
12732 /* Can't inline tail calls at this time */
12733 inline_costs += 100000;
12740 token = read32 (ip + 2);
12741 klass = mini_get_class (method, token, generic_context);
12742 CHECK_TYPELOAD (klass);
12743 if (generic_class_is_reference_type (cfg, klass))
12744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12746 mini_emit_initobj (cfg, *sp, NULL, klass);
12750 case CEE_CONSTRAINED_:
12752 token = read32 (ip + 2);
12753 constrained_class = mini_get_class (method, token, generic_context);
12754 CHECK_TYPELOAD (constrained_class);
12758 case CEE_INITBLK: {
12759 MonoInst *iargs [3];
12763 /* Skip optimized paths for volatile operations. */
12764 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12765 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12766 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12767 /* emit_memset only works when val == 0 */
12768 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12771 iargs [0] = sp [0];
12772 iargs [1] = sp [1];
12773 iargs [2] = sp [2];
12774 if (ip [1] == CEE_CPBLK) {
12776 * FIXME: It's unclear whether we should be emitting both the acquire
12777 * and release barriers for cpblk. It is technically both a load and
12778 * store operation, so it seems like that's the sensible thing to do.
12780 * FIXME: We emit full barriers on both sides of the operation for
12781 * simplicity. We should have a separate atomic memcpy method instead.
12783 MonoMethod *memcpy_method = get_memcpy_method ();
12785 if (ins_flag & MONO_INST_VOLATILE)
12786 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12788 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12789 call->flags |= ins_flag;
12791 if (ins_flag & MONO_INST_VOLATILE)
12792 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12794 MonoMethod *memset_method = get_memset_method ();
12795 if (ins_flag & MONO_INST_VOLATILE) {
12796 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12797 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12799 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12800 call->flags |= ins_flag;
12811 ins_flag |= MONO_INST_NOTYPECHECK;
12813 ins_flag |= MONO_INST_NORANGECHECK;
12814 /* we ignore the no-nullcheck for now since we
12815 * really do it explicitly only when doing callvirt->call
12819 case CEE_RETHROW: {
12821 int handler_offset = -1;
12823 for (i = 0; i < header->num_clauses; ++i) {
12824 MonoExceptionClause *clause = &header->clauses [i];
12825 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12826 handler_offset = clause->handler_offset;
12831 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12833 if (handler_offset == -1)
12836 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12837 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12838 ins->sreg1 = load->dreg;
12839 MONO_ADD_INS (cfg->cbb, ins);
12841 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12842 MONO_ADD_INS (cfg->cbb, ins);
12845 link_bblock (cfg, cfg->cbb, end_bblock);
12846 start_new_bblock = 1;
12854 CHECK_STACK_OVF (1);
12856 token = read32 (ip + 2);
12857 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12858 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12861 val = mono_type_size (type, &ialign);
12863 MonoClass *klass = mini_get_class (method, token, generic_context);
12864 CHECK_TYPELOAD (klass);
12866 val = mono_type_size (&klass->byval_arg, &ialign);
12868 if (mini_is_gsharedvt_klass (klass))
12869 GSHAREDVT_FAILURE (*ip);
12871 EMIT_NEW_ICONST (cfg, ins, val);
12876 case CEE_REFANYTYPE: {
12877 MonoInst *src_var, *src;
12879 GSHAREDVT_FAILURE (*ip);
12885 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12887 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12888 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12889 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12894 case CEE_READONLY_:
12907 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12917 g_warning ("opcode 0x%02x not handled", *ip);
12921 if (start_new_bblock != 1)
12924 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12925 if (cfg->cbb->next_bb) {
12926 /* This could already be set because of inlining, #693905 */
12927 MonoBasicBlock *bb = cfg->cbb;
12929 while (bb->next_bb)
12931 bb->next_bb = end_bblock;
12933 cfg->cbb->next_bb = end_bblock;
12936 if (cfg->method == method && cfg->domainvar) {
12938 MonoInst *get_domain;
12940 cfg->cbb = init_localsbb;
12942 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12943 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12944 MONO_ADD_INS (cfg->cbb, store);
12947 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12948 if (cfg->compile_aot)
12949 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12950 mono_get_got_var (cfg);
12953 if (cfg->method == method && cfg->got_var)
12954 mono_emit_load_got_addr (cfg);
12956 if (init_localsbb) {
12957 cfg->cbb = init_localsbb;
12959 for (i = 0; i < header->num_locals; ++i) {
12960 emit_init_local (cfg, i, header->locals [i], init_locals);
12964 if (cfg->init_ref_vars && cfg->method == method) {
12965 /* Emit initialization for ref vars */
12966 // FIXME: Avoid duplication initialization for IL locals.
12967 for (i = 0; i < cfg->num_varinfo; ++i) {
12968 MonoInst *ins = cfg->varinfo [i];
12970 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12971 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12975 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12976 cfg->cbb = init_localsbb;
12977 emit_push_lmf (cfg);
12980 cfg->cbb = init_localsbb;
12981 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12984 MonoBasicBlock *bb;
12987 * Make seq points at backward branch targets interruptable.
12989 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12990 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12991 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12994 /* Add a sequence point for method entry/exit events */
12995 if (seq_points && cfg->gen_sdb_seq_points) {
12996 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12997 MONO_ADD_INS (init_localsbb, ins);
12998 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12999 MONO_ADD_INS (cfg->bb_exit, ins);
13003 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13004 * the code they refer to was dead (#11880).
13006 if (sym_seq_points) {
13007 for (i = 0; i < header->code_size; ++i) {
13008 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13011 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13012 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13019 if (cfg->method == method) {
13020 MonoBasicBlock *bb;
13021 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13022 if (bb == cfg->bb_init)
13025 bb->region = mono_find_block_region (cfg, bb->real_offset);
13027 mono_create_spvar_for_region (cfg, bb->region);
13028 if (cfg->verbose_level > 2)
13029 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13032 MonoBasicBlock *bb;
13033 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13034 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13035 bb->real_offset = inline_offset;
13039 if (inline_costs < 0) {
13042 /* Method is too large */
13043 mname = mono_method_full_name (method, TRUE);
13044 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13048 if ((cfg->verbose_level > 2) && (cfg->method == method))
13049 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13054 g_assert (!mono_error_ok (&cfg->error));
13058 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13062 set_exception_type_from_invalid_il (cfg, method, ip);
13066 g_slist_free (class_inits);
13067 mono_basic_block_free (original_bb);
13068 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13069 if (cfg->exception_type)
13072 return inline_costs;
13076 store_membase_reg_to_store_membase_imm (int opcode)
13079 case OP_STORE_MEMBASE_REG:
13080 return OP_STORE_MEMBASE_IMM;
13081 case OP_STOREI1_MEMBASE_REG:
13082 return OP_STOREI1_MEMBASE_IMM;
13083 case OP_STOREI2_MEMBASE_REG:
13084 return OP_STOREI2_MEMBASE_IMM;
13085 case OP_STOREI4_MEMBASE_REG:
13086 return OP_STOREI4_MEMBASE_IMM;
13087 case OP_STOREI8_MEMBASE_REG:
13088 return OP_STOREI8_MEMBASE_IMM;
13090 g_assert_not_reached ();
13097 mono_op_to_op_imm (int opcode)
13101 return OP_IADD_IMM;
13103 return OP_ISUB_IMM;
13105 return OP_IDIV_IMM;
13107 return OP_IDIV_UN_IMM;
13109 return OP_IREM_IMM;
13111 return OP_IREM_UN_IMM;
13113 return OP_IMUL_IMM;
13115 return OP_IAND_IMM;
13119 return OP_IXOR_IMM;
13121 return OP_ISHL_IMM;
13123 return OP_ISHR_IMM;
13125 return OP_ISHR_UN_IMM;
13128 return OP_LADD_IMM;
13130 return OP_LSUB_IMM;
13132 return OP_LAND_IMM;
13136 return OP_LXOR_IMM;
13138 return OP_LSHL_IMM;
13140 return OP_LSHR_IMM;
13142 return OP_LSHR_UN_IMM;
13143 #if SIZEOF_REGISTER == 8
13145 return OP_LREM_IMM;
13149 return OP_COMPARE_IMM;
13151 return OP_ICOMPARE_IMM;
13153 return OP_LCOMPARE_IMM;
13155 case OP_STORE_MEMBASE_REG:
13156 return OP_STORE_MEMBASE_IMM;
13157 case OP_STOREI1_MEMBASE_REG:
13158 return OP_STOREI1_MEMBASE_IMM;
13159 case OP_STOREI2_MEMBASE_REG:
13160 return OP_STOREI2_MEMBASE_IMM;
13161 case OP_STOREI4_MEMBASE_REG:
13162 return OP_STOREI4_MEMBASE_IMM;
13164 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13166 return OP_X86_PUSH_IMM;
13167 case OP_X86_COMPARE_MEMBASE_REG:
13168 return OP_X86_COMPARE_MEMBASE_IMM;
13170 #if defined(TARGET_AMD64)
13171 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13172 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13174 case OP_VOIDCALL_REG:
13175 return OP_VOIDCALL;
13183 return OP_LOCALLOC_IMM;
13190 ldind_to_load_membase (int opcode)
13194 return OP_LOADI1_MEMBASE;
13196 return OP_LOADU1_MEMBASE;
13198 return OP_LOADI2_MEMBASE;
13200 return OP_LOADU2_MEMBASE;
13202 return OP_LOADI4_MEMBASE;
13204 return OP_LOADU4_MEMBASE;
13206 return OP_LOAD_MEMBASE;
13207 case CEE_LDIND_REF:
13208 return OP_LOAD_MEMBASE;
13210 return OP_LOADI8_MEMBASE;
13212 return OP_LOADR4_MEMBASE;
13214 return OP_LOADR8_MEMBASE;
13216 g_assert_not_reached ();
13223 stind_to_store_membase (int opcode)
13227 return OP_STOREI1_MEMBASE_REG;
13229 return OP_STOREI2_MEMBASE_REG;
13231 return OP_STOREI4_MEMBASE_REG;
13233 case CEE_STIND_REF:
13234 return OP_STORE_MEMBASE_REG;
13236 return OP_STOREI8_MEMBASE_REG;
13238 return OP_STORER4_MEMBASE_REG;
13240 return OP_STORER8_MEMBASE_REG;
13242 g_assert_not_reached ();
13249 mono_load_membase_to_load_mem (int opcode)
13251 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13252 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13254 case OP_LOAD_MEMBASE:
13255 return OP_LOAD_MEM;
13256 case OP_LOADU1_MEMBASE:
13257 return OP_LOADU1_MEM;
13258 case OP_LOADU2_MEMBASE:
13259 return OP_LOADU2_MEM;
13260 case OP_LOADI4_MEMBASE:
13261 return OP_LOADI4_MEM;
13262 case OP_LOADU4_MEMBASE:
13263 return OP_LOADU4_MEM;
13264 #if SIZEOF_REGISTER == 8
13265 case OP_LOADI8_MEMBASE:
13266 return OP_LOADI8_MEM;
13275 op_to_op_dest_membase (int store_opcode, int opcode)
13277 #if defined(TARGET_X86)
13278 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13283 return OP_X86_ADD_MEMBASE_REG;
13285 return OP_X86_SUB_MEMBASE_REG;
13287 return OP_X86_AND_MEMBASE_REG;
13289 return OP_X86_OR_MEMBASE_REG;
13291 return OP_X86_XOR_MEMBASE_REG;
13294 return OP_X86_ADD_MEMBASE_IMM;
13297 return OP_X86_SUB_MEMBASE_IMM;
13300 return OP_X86_AND_MEMBASE_IMM;
13303 return OP_X86_OR_MEMBASE_IMM;
13306 return OP_X86_XOR_MEMBASE_IMM;
13312 #if defined(TARGET_AMD64)
13313 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13318 return OP_X86_ADD_MEMBASE_REG;
13320 return OP_X86_SUB_MEMBASE_REG;
13322 return OP_X86_AND_MEMBASE_REG;
13324 return OP_X86_OR_MEMBASE_REG;
13326 return OP_X86_XOR_MEMBASE_REG;
13328 return OP_X86_ADD_MEMBASE_IMM;
13330 return OP_X86_SUB_MEMBASE_IMM;
13332 return OP_X86_AND_MEMBASE_IMM;
13334 return OP_X86_OR_MEMBASE_IMM;
13336 return OP_X86_XOR_MEMBASE_IMM;
13338 return OP_AMD64_ADD_MEMBASE_REG;
13340 return OP_AMD64_SUB_MEMBASE_REG;
13342 return OP_AMD64_AND_MEMBASE_REG;
13344 return OP_AMD64_OR_MEMBASE_REG;
13346 return OP_AMD64_XOR_MEMBASE_REG;
13349 return OP_AMD64_ADD_MEMBASE_IMM;
13352 return OP_AMD64_SUB_MEMBASE_IMM;
13355 return OP_AMD64_AND_MEMBASE_IMM;
13358 return OP_AMD64_OR_MEMBASE_IMM;
13361 return OP_AMD64_XOR_MEMBASE_IMM;
13371 op_to_op_store_membase (int store_opcode, int opcode)
13373 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13376 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13377 return OP_X86_SETEQ_MEMBASE;
13379 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13380 return OP_X86_SETNE_MEMBASE;
13388 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13391 /* FIXME: This has sign extension issues */
13393 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13394 return OP_X86_COMPARE_MEMBASE8_IMM;
13397 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13402 return OP_X86_PUSH_MEMBASE;
13403 case OP_COMPARE_IMM:
13404 case OP_ICOMPARE_IMM:
13405 return OP_X86_COMPARE_MEMBASE_IMM;
13408 return OP_X86_COMPARE_MEMBASE_REG;
13412 #ifdef TARGET_AMD64
13413 /* FIXME: This has sign extension issues */
13415 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13416 return OP_X86_COMPARE_MEMBASE8_IMM;
13421 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13422 return OP_X86_PUSH_MEMBASE;
13424 /* FIXME: This only works for 32 bit immediates
13425 case OP_COMPARE_IMM:
13426 case OP_LCOMPARE_IMM:
13427 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13428 return OP_AMD64_COMPARE_MEMBASE_IMM;
13430 case OP_ICOMPARE_IMM:
13431 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13432 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13436 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13437 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13438 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13439 return OP_AMD64_COMPARE_MEMBASE_REG;
13442 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13443 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13452 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13455 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13461 return OP_X86_COMPARE_REG_MEMBASE;
13463 return OP_X86_ADD_REG_MEMBASE;
13465 return OP_X86_SUB_REG_MEMBASE;
13467 return OP_X86_AND_REG_MEMBASE;
13469 return OP_X86_OR_REG_MEMBASE;
13471 return OP_X86_XOR_REG_MEMBASE;
13475 #ifdef TARGET_AMD64
13476 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13479 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13481 return OP_X86_ADD_REG_MEMBASE;
13483 return OP_X86_SUB_REG_MEMBASE;
13485 return OP_X86_AND_REG_MEMBASE;
13487 return OP_X86_OR_REG_MEMBASE;
13489 return OP_X86_XOR_REG_MEMBASE;
13491 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13495 return OP_AMD64_COMPARE_REG_MEMBASE;
13497 return OP_AMD64_ADD_REG_MEMBASE;
13499 return OP_AMD64_SUB_REG_MEMBASE;
13501 return OP_AMD64_AND_REG_MEMBASE;
13503 return OP_AMD64_OR_REG_MEMBASE;
13505 return OP_AMD64_XOR_REG_MEMBASE;
13514 mono_op_to_op_imm_noemul (int opcode)
13517 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13523 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13530 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13535 return mono_op_to_op_imm (opcode);
13540 * mono_handle_global_vregs:
13542 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13546 mono_handle_global_vregs (MonoCompile *cfg)
13548 gint32 *vreg_to_bb;
13549 MonoBasicBlock *bb;
13552 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13554 #ifdef MONO_ARCH_SIMD_INTRINSICS
13555 if (cfg->uses_simd_intrinsics)
13556 mono_simd_simplify_indirection (cfg);
13559 /* Find local vregs used in more than one bb */
13560 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13561 MonoInst *ins = bb->code;
13562 int block_num = bb->block_num;
13564 if (cfg->verbose_level > 2)
13565 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13568 for (; ins; ins = ins->next) {
13569 const char *spec = INS_INFO (ins->opcode);
13570 int regtype = 0, regindex;
13573 if (G_UNLIKELY (cfg->verbose_level > 2))
13574 mono_print_ins (ins);
13576 g_assert (ins->opcode >= MONO_CEE_LAST);
13578 for (regindex = 0; regindex < 4; regindex ++) {
13581 if (regindex == 0) {
13582 regtype = spec [MONO_INST_DEST];
13583 if (regtype == ' ')
13586 } else if (regindex == 1) {
13587 regtype = spec [MONO_INST_SRC1];
13588 if (regtype == ' ')
13591 } else if (regindex == 2) {
13592 regtype = spec [MONO_INST_SRC2];
13593 if (regtype == ' ')
13596 } else if (regindex == 3) {
13597 regtype = spec [MONO_INST_SRC3];
13598 if (regtype == ' ')
13603 #if SIZEOF_REGISTER == 4
13604 /* In the LLVM case, the long opcodes are not decomposed */
13605 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13607 * Since some instructions reference the original long vreg,
13608 * and some reference the two component vregs, it is quite hard
13609 * to determine when it needs to be global. So be conservative.
13611 if (!get_vreg_to_inst (cfg, vreg)) {
13612 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13614 if (cfg->verbose_level > 2)
13615 printf ("LONG VREG R%d made global.\n", vreg);
13619 * Make the component vregs volatile since the optimizations can
13620 * get confused otherwise.
13622 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13623 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13627 g_assert (vreg != -1);
13629 prev_bb = vreg_to_bb [vreg];
13630 if (prev_bb == 0) {
13631 /* 0 is a valid block num */
13632 vreg_to_bb [vreg] = block_num + 1;
13633 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13634 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13637 if (!get_vreg_to_inst (cfg, vreg)) {
13638 if (G_UNLIKELY (cfg->verbose_level > 2))
13639 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13643 if (vreg_is_ref (cfg, vreg))
13644 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13646 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13649 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13652 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13656 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13659 g_assert_not_reached ();
13663 /* Flag as having been used in more than one bb */
13664 vreg_to_bb [vreg] = -1;
13670 /* If a variable is used in only one bblock, convert it into a local vreg */
13671 for (i = 0; i < cfg->num_varinfo; i++) {
13672 MonoInst *var = cfg->varinfo [i];
13673 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13675 switch (var->type) {
13681 #if SIZEOF_REGISTER == 8
13684 #if !defined(TARGET_X86)
13685 /* Enabling this screws up the fp stack on x86 */
13688 if (mono_arch_is_soft_float ())
13692 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13696 /* Arguments are implicitly global */
13697 /* Putting R4 vars into registers doesn't work currently */
13698 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13699 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13701 * Make that the variable's liveness interval doesn't contain a call, since
13702 * that would cause the lvreg to be spilled, making the whole optimization
13705 /* This is too slow for JIT compilation */
13707 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13709 int def_index, call_index, ins_index;
13710 gboolean spilled = FALSE;
13715 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13716 const char *spec = INS_INFO (ins->opcode);
13718 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13719 def_index = ins_index;
13721 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13722 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13723 if (call_index > def_index) {
13729 if (MONO_IS_CALL (ins))
13730 call_index = ins_index;
13740 if (G_UNLIKELY (cfg->verbose_level > 2))
13741 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13742 var->flags |= MONO_INST_IS_DEAD;
13743 cfg->vreg_to_inst [var->dreg] = NULL;
13750 * Compress the varinfo and vars tables so the liveness computation is faster and
13751 * takes up less space.
13754 for (i = 0; i < cfg->num_varinfo; ++i) {
13755 MonoInst *var = cfg->varinfo [i];
13756 if (pos < i && cfg->locals_start == i)
13757 cfg->locals_start = pos;
13758 if (!(var->flags & MONO_INST_IS_DEAD)) {
13760 cfg->varinfo [pos] = cfg->varinfo [i];
13761 cfg->varinfo [pos]->inst_c0 = pos;
13762 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13763 cfg->vars [pos].idx = pos;
13764 #if SIZEOF_REGISTER == 4
13765 if (cfg->varinfo [pos]->type == STACK_I8) {
13766 /* Modify the two component vars too */
13769 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13770 var1->inst_c0 = pos;
13771 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13772 var1->inst_c0 = pos;
13779 cfg->num_varinfo = pos;
13780 if (cfg->locals_start > cfg->num_varinfo)
13781 cfg->locals_start = cfg->num_varinfo;
13785 * mono_allocate_gsharedvt_vars:
13787 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13788 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13791 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13795 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13797 for (i = 0; i < cfg->num_varinfo; ++i) {
13798 MonoInst *ins = cfg->varinfo [i];
13801 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13802 if (i >= cfg->locals_start) {
13804 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13805 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13806 ins->opcode = OP_GSHAREDVT_LOCAL;
13807 ins->inst_imm = idx;
13810 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13811 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13818 * mono_spill_global_vars:
13820 * Generate spill code for variables which are not allocated to registers,
13821 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13822 * code is generated which could be optimized by the local optimization passes.
13825 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13827 MonoBasicBlock *bb;
13829 int orig_next_vreg;
13830 guint32 *vreg_to_lvreg;
13832 guint32 i, lvregs_len, lvregs_size;
13833 gboolean dest_has_lvreg = FALSE;
13834 MonoStackType stacktypes [128];
13835 MonoInst **live_range_start, **live_range_end;
13836 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13838 *need_local_opts = FALSE;
13840 memset (spec2, 0, sizeof (spec2));
13842 /* FIXME: Move this function to mini.c */
13843 stacktypes ['i'] = STACK_PTR;
13844 stacktypes ['l'] = STACK_I8;
13845 stacktypes ['f'] = STACK_R8;
13846 #ifdef MONO_ARCH_SIMD_INTRINSICS
13847 stacktypes ['x'] = STACK_VTYPE;
13850 #if SIZEOF_REGISTER == 4
13851 /* Create MonoInsts for longs */
13852 for (i = 0; i < cfg->num_varinfo; i++) {
13853 MonoInst *ins = cfg->varinfo [i];
13855 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13856 switch (ins->type) {
13861 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13864 g_assert (ins->opcode == OP_REGOFFSET);
13866 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13868 tree->opcode = OP_REGOFFSET;
13869 tree->inst_basereg = ins->inst_basereg;
13870 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13872 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13874 tree->opcode = OP_REGOFFSET;
13875 tree->inst_basereg = ins->inst_basereg;
13876 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13886 if (cfg->compute_gc_maps) {
13887 /* registers need liveness info even for !non refs */
13888 for (i = 0; i < cfg->num_varinfo; i++) {
13889 MonoInst *ins = cfg->varinfo [i];
13891 if (ins->opcode == OP_REGVAR)
13892 ins->flags |= MONO_INST_GC_TRACK;
13896 /* FIXME: widening and truncation */
13899 * As an optimization, when a variable allocated to the stack is first loaded into
13900 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13901 * the variable again.
13903 orig_next_vreg = cfg->next_vreg;
13904 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13905 lvregs_size = 1024;
13906 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13910 * These arrays contain the first and last instructions accessing a given
13912 * Since we emit bblocks in the same order we process them here, and we
13913 * don't split live ranges, these will precisely describe the live range of
13914 * the variable, i.e. the instruction range where a valid value can be found
13915 * in the variables location.
13916 * The live range is computed using the liveness info computed by the liveness pass.
13917 * We can't use vmv->range, since that is an abstract live range, and we need
13918 * one which is instruction precise.
13919 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13921 /* FIXME: Only do this if debugging info is requested */
13922 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13923 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13924 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13925 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13927 /* Add spill loads/stores */
13928 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13931 if (cfg->verbose_level > 2)
13932 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13934 /* Clear vreg_to_lvreg array */
13935 for (i = 0; i < lvregs_len; i++)
13936 vreg_to_lvreg [lvregs [i]] = 0;
13940 MONO_BB_FOR_EACH_INS (bb, ins) {
13941 const char *spec = INS_INFO (ins->opcode);
13942 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13943 gboolean store, no_lvreg;
13944 int sregs [MONO_MAX_SRC_REGS];
13946 if (G_UNLIKELY (cfg->verbose_level > 2))
13947 mono_print_ins (ins);
13949 if (ins->opcode == OP_NOP)
13953 * We handle LDADDR here as well, since it can only be decomposed
13954 * when variable addresses are known.
13956 if (ins->opcode == OP_LDADDR) {
13957 MonoInst *var = (MonoInst *)ins->inst_p0;
13959 if (var->opcode == OP_VTARG_ADDR) {
13960 /* Happens on SPARC/S390 where vtypes are passed by reference */
13961 MonoInst *vtaddr = var->inst_left;
13962 if (vtaddr->opcode == OP_REGVAR) {
13963 ins->opcode = OP_MOVE;
13964 ins->sreg1 = vtaddr->dreg;
13966 else if (var->inst_left->opcode == OP_REGOFFSET) {
13967 ins->opcode = OP_LOAD_MEMBASE;
13968 ins->inst_basereg = vtaddr->inst_basereg;
13969 ins->inst_offset = vtaddr->inst_offset;
13972 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13973 /* gsharedvt arg passed by ref */
13974 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13976 ins->opcode = OP_LOAD_MEMBASE;
13977 ins->inst_basereg = var->inst_basereg;
13978 ins->inst_offset = var->inst_offset;
13979 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13980 MonoInst *load, *load2, *load3;
13981 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13982 int reg1, reg2, reg3;
13983 MonoInst *info_var = cfg->gsharedvt_info_var;
13984 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13988 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13991 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13993 g_assert (info_var);
13994 g_assert (locals_var);
13996 /* Mark the instruction used to compute the locals var as used */
13997 cfg->gsharedvt_locals_var_ins = NULL;
13999 /* Load the offset */
14000 if (info_var->opcode == OP_REGOFFSET) {
14001 reg1 = alloc_ireg (cfg);
14002 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14003 } else if (info_var->opcode == OP_REGVAR) {
14005 reg1 = info_var->dreg;
14007 g_assert_not_reached ();
14009 reg2 = alloc_ireg (cfg);
14010 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14011 /* Load the locals area address */
14012 reg3 = alloc_ireg (cfg);
14013 if (locals_var->opcode == OP_REGOFFSET) {
14014 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14015 } else if (locals_var->opcode == OP_REGVAR) {
14016 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14018 g_assert_not_reached ();
14020 /* Compute the address */
14021 ins->opcode = OP_PADD;
14025 mono_bblock_insert_before_ins (bb, ins, load3);
14026 mono_bblock_insert_before_ins (bb, load3, load2);
14028 mono_bblock_insert_before_ins (bb, load2, load);
14030 g_assert (var->opcode == OP_REGOFFSET);
14032 ins->opcode = OP_ADD_IMM;
14033 ins->sreg1 = var->inst_basereg;
14034 ins->inst_imm = var->inst_offset;
14037 *need_local_opts = TRUE;
14038 spec = INS_INFO (ins->opcode);
14041 if (ins->opcode < MONO_CEE_LAST) {
14042 mono_print_ins (ins);
14043 g_assert_not_reached ();
14047 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14051 if (MONO_IS_STORE_MEMBASE (ins)) {
14052 tmp_reg = ins->dreg;
14053 ins->dreg = ins->sreg2;
14054 ins->sreg2 = tmp_reg;
14057 spec2 [MONO_INST_DEST] = ' ';
14058 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14059 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14060 spec2 [MONO_INST_SRC3] = ' ';
14062 } else if (MONO_IS_STORE_MEMINDEX (ins))
14063 g_assert_not_reached ();
14068 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14069 printf ("\t %.3s %d", spec, ins->dreg);
14070 num_sregs = mono_inst_get_src_registers (ins, sregs);
14071 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14072 printf (" %d", sregs [srcindex]);
14079 regtype = spec [MONO_INST_DEST];
14080 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14083 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14084 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14085 MonoInst *store_ins;
14087 MonoInst *def_ins = ins;
14088 int dreg = ins->dreg; /* The original vreg */
14090 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14092 if (var->opcode == OP_REGVAR) {
14093 ins->dreg = var->dreg;
14094 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14096 * Instead of emitting a load+store, use a _membase opcode.
14098 g_assert (var->opcode == OP_REGOFFSET);
14099 if (ins->opcode == OP_MOVE) {
14103 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14104 ins->inst_basereg = var->inst_basereg;
14105 ins->inst_offset = var->inst_offset;
14108 spec = INS_INFO (ins->opcode);
14112 g_assert (var->opcode == OP_REGOFFSET);
14114 prev_dreg = ins->dreg;
14116 /* Invalidate any previous lvreg for this vreg */
14117 vreg_to_lvreg [ins->dreg] = 0;
14121 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14123 store_opcode = OP_STOREI8_MEMBASE_REG;
14126 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14128 #if SIZEOF_REGISTER != 8
14129 if (regtype == 'l') {
14130 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14131 mono_bblock_insert_after_ins (bb, ins, store_ins);
14132 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14133 mono_bblock_insert_after_ins (bb, ins, store_ins);
14134 def_ins = store_ins;
14139 g_assert (store_opcode != OP_STOREV_MEMBASE);
14141 /* Try to fuse the store into the instruction itself */
14142 /* FIXME: Add more instructions */
14143 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14144 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14145 ins->inst_imm = ins->inst_c0;
14146 ins->inst_destbasereg = var->inst_basereg;
14147 ins->inst_offset = var->inst_offset;
14148 spec = INS_INFO (ins->opcode);
14149 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14150 ins->opcode = store_opcode;
14151 ins->inst_destbasereg = var->inst_basereg;
14152 ins->inst_offset = var->inst_offset;
14156 tmp_reg = ins->dreg;
14157 ins->dreg = ins->sreg2;
14158 ins->sreg2 = tmp_reg;
14161 spec2 [MONO_INST_DEST] = ' ';
14162 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14163 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14164 spec2 [MONO_INST_SRC3] = ' ';
14166 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14167 // FIXME: The backends expect the base reg to be in inst_basereg
14168 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14170 ins->inst_basereg = var->inst_basereg;
14171 ins->inst_offset = var->inst_offset;
14172 spec = INS_INFO (ins->opcode);
14174 /* printf ("INS: "); mono_print_ins (ins); */
14175 /* Create a store instruction */
14176 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14178 /* Insert it after the instruction */
14179 mono_bblock_insert_after_ins (bb, ins, store_ins);
14181 def_ins = store_ins;
14184 * We can't assign ins->dreg to var->dreg here, since the
14185 * sregs could use it. So set a flag, and do it after
14188 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14189 dest_has_lvreg = TRUE;
14194 if (def_ins && !live_range_start [dreg]) {
14195 live_range_start [dreg] = def_ins;
14196 live_range_start_bb [dreg] = bb;
14199 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14202 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14203 tmp->inst_c1 = dreg;
14204 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14211 num_sregs = mono_inst_get_src_registers (ins, sregs);
14212 for (srcindex = 0; srcindex < 3; ++srcindex) {
14213 regtype = spec [MONO_INST_SRC1 + srcindex];
14214 sreg = sregs [srcindex];
14216 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14217 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14218 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14219 MonoInst *use_ins = ins;
14220 MonoInst *load_ins;
14221 guint32 load_opcode;
14223 if (var->opcode == OP_REGVAR) {
14224 sregs [srcindex] = var->dreg;
14225 //mono_inst_set_src_registers (ins, sregs);
14226 live_range_end [sreg] = use_ins;
14227 live_range_end_bb [sreg] = bb;
14229 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14232 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14233 /* var->dreg is a hreg */
14234 tmp->inst_c1 = sreg;
14235 mono_bblock_insert_after_ins (bb, ins, tmp);
14241 g_assert (var->opcode == OP_REGOFFSET);
14243 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14245 g_assert (load_opcode != OP_LOADV_MEMBASE);
14247 if (vreg_to_lvreg [sreg]) {
14248 g_assert (vreg_to_lvreg [sreg] != -1);
14250 /* The variable is already loaded to an lvreg */
14251 if (G_UNLIKELY (cfg->verbose_level > 2))
14252 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14253 sregs [srcindex] = vreg_to_lvreg [sreg];
14254 //mono_inst_set_src_registers (ins, sregs);
14258 /* Try to fuse the load into the instruction */
14259 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14260 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14261 sregs [0] = var->inst_basereg;
14262 //mono_inst_set_src_registers (ins, sregs);
14263 ins->inst_offset = var->inst_offset;
14264 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14265 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14266 sregs [1] = var->inst_basereg;
14267 //mono_inst_set_src_registers (ins, sregs);
14268 ins->inst_offset = var->inst_offset;
14270 if (MONO_IS_REAL_MOVE (ins)) {
14271 ins->opcode = OP_NOP;
14274 //printf ("%d ", srcindex); mono_print_ins (ins);
14276 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14278 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14279 if (var->dreg == prev_dreg) {
14281 * sreg refers to the value loaded by the load
14282 * emitted below, but we need to use ins->dreg
14283 * since it refers to the store emitted earlier.
14287 g_assert (sreg != -1);
14288 vreg_to_lvreg [var->dreg] = sreg;
14289 if (lvregs_len >= lvregs_size) {
14290 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14291 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14292 lvregs = new_lvregs;
14295 lvregs [lvregs_len ++] = var->dreg;
14299 sregs [srcindex] = sreg;
14300 //mono_inst_set_src_registers (ins, sregs);
14302 #if SIZEOF_REGISTER != 8
14303 if (regtype == 'l') {
14304 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14305 mono_bblock_insert_before_ins (bb, ins, load_ins);
14306 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14307 mono_bblock_insert_before_ins (bb, ins, load_ins);
14308 use_ins = load_ins;
14313 #if SIZEOF_REGISTER == 4
14314 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14316 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14317 mono_bblock_insert_before_ins (bb, ins, load_ins);
14318 use_ins = load_ins;
14322 if (var->dreg < orig_next_vreg) {
14323 live_range_end [var->dreg] = use_ins;
14324 live_range_end_bb [var->dreg] = bb;
14327 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14330 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14331 tmp->inst_c1 = var->dreg;
14332 mono_bblock_insert_after_ins (bb, ins, tmp);
14336 mono_inst_set_src_registers (ins, sregs);
14338 if (dest_has_lvreg) {
14339 g_assert (ins->dreg != -1);
14340 vreg_to_lvreg [prev_dreg] = ins->dreg;
14341 if (lvregs_len >= lvregs_size) {
14342 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14343 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14344 lvregs = new_lvregs;
14347 lvregs [lvregs_len ++] = prev_dreg;
14348 dest_has_lvreg = FALSE;
14352 tmp_reg = ins->dreg;
14353 ins->dreg = ins->sreg2;
14354 ins->sreg2 = tmp_reg;
14357 if (MONO_IS_CALL (ins)) {
14358 /* Clear vreg_to_lvreg array */
14359 for (i = 0; i < lvregs_len; i++)
14360 vreg_to_lvreg [lvregs [i]] = 0;
14362 } else if (ins->opcode == OP_NOP) {
14364 MONO_INST_NULLIFY_SREGS (ins);
14367 if (cfg->verbose_level > 2)
14368 mono_print_ins_index (1, ins);
14371 /* Extend the live range based on the liveness info */
14372 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14373 for (i = 0; i < cfg->num_varinfo; i ++) {
14374 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14376 if (vreg_is_volatile (cfg, vi->vreg))
14377 /* The liveness info is incomplete */
14380 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14381 /* Live from at least the first ins of this bb */
14382 live_range_start [vi->vreg] = bb->code;
14383 live_range_start_bb [vi->vreg] = bb;
14386 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14387 /* Live at least until the last ins of this bb */
14388 live_range_end [vi->vreg] = bb->last_ins;
14389 live_range_end_bb [vi->vreg] = bb;
14396 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14397 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14399 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14400 for (i = 0; i < cfg->num_varinfo; ++i) {
14401 int vreg = MONO_VARINFO (cfg, i)->vreg;
14404 if (live_range_start [vreg]) {
14405 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14407 ins->inst_c1 = vreg;
14408 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14410 if (live_range_end [vreg]) {
14411 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14413 ins->inst_c1 = vreg;
14414 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14415 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14417 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14422 if (cfg->gsharedvt_locals_var_ins) {
14423 /* Nullify if unused */
14424 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14425 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14428 g_free (live_range_start);
14429 g_free (live_range_end);
14430 g_free (live_range_start_bb);
14431 g_free (live_range_end_bb);
14437 * - use 'iadd' instead of 'int_add'
14438 * - handling ovf opcodes: decompose in method_to_ir.
14439 * - unify iregs/fregs
14440 * -> partly done, the missing parts are:
14441 * - a more complete unification would involve unifying the hregs as well, so
14442 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14443 * would no longer map to the machine hregs, so the code generators would need to
14444 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14445 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14446 * fp/non-fp branches speeds it up by about 15%.
14447 * - use sext/zext opcodes instead of shifts
14449 * - get rid of TEMPLOADs if possible and use vregs instead
14450 * - clean up usage of OP_P/OP_ opcodes
14451 * - cleanup usage of DUMMY_USE
14452 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14454 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14455 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14456 * - make sure handle_stack_args () is called before the branch is emitted
14457 * - when the new IR is done, get rid of all unused stuff
14458 * - COMPARE/BEQ as separate instructions or unify them ?
14459 * - keeping them separate allows specialized compare instructions like
14460 * compare_imm, compare_membase
14461 * - most back ends unify fp compare+branch, fp compare+ceq
14462 * - integrate mono_save_args into inline_method
14463 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14464 * - handle long shift opts on 32 bit platforms somehow: they require
14465 * 3 sregs (2 for arg1 and 1 for arg2)
14466 * - make byref a 'normal' type.
14467 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14468 * variable if needed.
14469 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14470 * like inline_method.
14471 * - remove inlining restrictions
14472 * - fix LNEG and enable cfold of INEG
14473 * - generalize x86 optimizations like ldelema as a peephole optimization
14474 * - add store_mem_imm for amd64
14475 * - optimize the loading of the interruption flag in the managed->native wrappers
14476 * - avoid special handling of OP_NOP in passes
14477 * - move code inserting instructions into one function/macro.
14478 * - try a coalescing phase after liveness analysis
14479 * - add float -> vreg conversion + local optimizations on !x86
14480 * - figure out how to handle decomposed branches during optimizations, ie.
14481 * compare+branch, op_jump_table+op_br etc.
14482 * - promote RuntimeXHandles to vregs
14483 * - vtype cleanups:
14484 * - add a NEW_VARLOADA_VREG macro
14485 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14486 * accessing vtype fields.
14487 * - get rid of I8CONST on 64 bit platforms
14488 * - dealing with the increase in code size due to branches created during opcode
14490 * - use extended basic blocks
14491 * - all parts of the JIT
14492 * - handle_global_vregs () && local regalloc
14493 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14494 * - sources of increase in code size:
14497 * - isinst and castclass
14498 * - lvregs not allocated to global registers even if used multiple times
14499 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14501 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14502 * - add all micro optimizations from the old JIT
14503 * - put tree optimizations into the deadce pass
14504 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14505 * specific function.
14506 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14507 * fcompare + branchCC.
14508 * - create a helper function for allocating a stack slot, taking into account
14509 * MONO_CFG_HAS_SPILLUP.
14511 * - merge the ia64 switch changes.
14512 * - optimize mono_regstate2_alloc_int/float.
14513 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14514 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14515 * parts of the tree could be separated by other instructions, killing the tree
14516 * arguments, or stores killing loads etc. Also, should we fold loads into other
14517 * instructions if the result of the load is used multiple times ?
14518 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14519 * - LAST MERGE: 108395.
14520 * - when returning vtypes in registers, generate IR and append it to the end of the
14521 * last bb instead of doing it in the epilog.
14522 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14530 - When to decompose opcodes:
14531 - earlier: this makes some optimizations hard to implement, since the low level IR
14532 no longer contains the neccessary information. But it is easier to do.
14533 - later: harder to implement, enables more optimizations.
14534 - Branches inside bblocks:
14535 - created when decomposing complex opcodes.
14536 - branches to another bblock: harmless, but not tracked by the branch
14537 optimizations, so need to branch to a label at the start of the bblock.
14538 - branches to inside the same bblock: very problematic, trips up the local
14539 reg allocator. Can be fixed by spitting the current bblock, but that is a
14540 complex operation, since some local vregs can become global vregs etc.
14541 - Local/global vregs:
14542 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14543 local register allocator.
14544 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14545 structure, created by mono_create_var (). Assigned to hregs or the stack by
14546 the global register allocator.
14547 - When to do optimizations like alu->alu_imm:
14548 - earlier -> saves work later on since the IR will be smaller/simpler
14549 - later -> can work on more instructions
14550 - Handling of valuetypes:
14551 - When a vtype is pushed on the stack, a new temporary is created, an
14552 instruction computing its address (LDADDR) is emitted and pushed on
14553 the stack. Need to optimize cases when the vtype is used immediately as in
14554 argument passing, stloc etc.
14555 - Instead of the to_end stuff in the old JIT, simply call the function handling
14556 the values on the stack before emitting the last instruction of the bb.
14559 #else /* !DISABLE_JIT */
14562 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
14566 #endif /* !DISABLE_JIT */